source
stringlengths
3
92
c
stringlengths
26
2.25M
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,sizeof(distort_args)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; PixelInfo invalid; /* the color to assign when distort result is invalid */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { register ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid, exception); { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel; /* pixel color to assign to distorted image */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause(void) { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers(void) { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(void); void test_safelen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 2 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp distribute simd' directive may not be reduction, predetermined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned(void) { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate(void) { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction(void) { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages(void) { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; } void test_nontemporal(void) { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} #pragma omp distribute simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{expected variable name}} #pragma omp distribute simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp distribute simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp distribute simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp distribute simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp distribute simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp distribute simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp distribute simd'}} for (int i = 0; i < 10; ++i) ; }
yoloDection.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef YOLODECTION_H #define YOLODECTION_H #include <fstream> #include <math.h> #include <vector> #include <algorithm> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include "net.h" struct BBoxRect { float xmin; float ymin; float xmax; float ymax; int label; }; class yoloDection { public: yoloDection(); yoloDection(int classN, int boxN, float conf_thre, float nms_thre, ncnn::Mat biases, ncnn::Mat mask, ncnn::Mat anchors_scale, int mask_group): num_class(classN),num_box(boxN), confidence_threshold(conf_thre), nms_threshold(nms_thre), biases(biases), mask(mask), anchors_scale(anchors_scale), mask_group_num(mask_group){} float sigmoid(float x); float intersection_area(const BBoxRect& a, const BBoxRect& b); template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right); template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores); void nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold); int detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob); public: int num_class; int num_box; float confidence_threshold; float nms_threshold; ncnn::Mat biases; ncnn::Mat mask; ncnn::Mat anchors_scale; int mask_group_num; }; inline float yoloDection::intersection_area(const BBoxRect& a, const BBoxRect& b) { if (a.xmin > b.xmax || a.xmax < b.xmin || a.ymin > b.ymax || a.ymax < b.ymin) { // no intersection return 0.f; } float inter_width = std::min(a.xmax, b.xmax) - std::max(a.xmin, b.xmin); float inter_height = std::min(a.ymax, b.ymax) - std::max(a.ymin, b.ymin); return inter_width * inter_height; } template <typename T> inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right) { int i = left; int j = right; float p = scores[(left + right) / 2]; while (i <= j) { while (scores[i] > p) i++; while (scores[j] < p) j--; if (i <= j) { // swap std::swap(datas[i], datas[j]); std::swap(scores[i], scores[j]); i++; j--; } } if (left < j) qsort_descent_inplace(datas, scores, left, j); if (i < right) qsort_descent_inplace(datas, scores, i, right); } template <typename T> inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores) { if (datas.empty() || scores.empty()) return; qsort_descent_inplace(datas, scores, 0, scores.size() - 1); } inline void yoloDection::nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold) { picked.clear(); const int n = bboxes.size(); std::vector<float> areas(n); for (int i = 0; i < n; i++) { const BBoxRect& r = bboxes[i]; float width = r.xmax - r.xmin; float height = r.ymax - r.ymin; areas[i] = width * height; } for (int i = 0; i < n; i++) { const BBoxRect& a = bboxes[i]; int keep = 1; for (int j = 0; j < (int)picked.size(); j++) { const BBoxRect& b = bboxes[picked[j]]; // intersection over union float inter_area = intersection_area(a, b); float union_area = areas[i] + areas[picked[j]] - inter_area; // float IoU = inter_area / union_area if (inter_area / union_area > nms_threshold) keep = 0; } if (keep) picked.push_back(i); } } inline float yoloDection::sigmoid(float x) { return 1.f / (1.f + exp(-x)); } inline int yoloDection::detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob) { // gather all box std::vector<BBoxRect> all_bbox_rects; std::vector<float> all_bbox_scores; std::ofstream fwrite; //fwrite.open("detection.txt"); for (size_t b = 0; b < bottom_blobs.size(); b++) { std::vector< std::vector<BBoxRect> > all_box_bbox_rects; std::vector< std::vector<float> > all_box_bbox_scores; all_box_bbox_rects.resize(num_box); all_box_bbox_scores.resize(num_box); const ncnn::Mat& bottom_top_blobs = bottom_blobs[b]; int w = bottom_top_blobs.w; int h = bottom_top_blobs.h; int channels = bottom_top_blobs.c; //printf("%d %d %d\n", w, h, channels); const int channels_per_box = channels / num_box; // anchor coord + box score + num_class if (channels_per_box != 4 + 1 + num_class) return -1; int mask_offset = b * num_box; int net_w = (int)(anchors_scale[b] * w); int net_h = (int)(anchors_scale[b] * h); //printf("%d %d\n", net_w, net_h); //printf("%d %d %d\n", w, h, channels); #pragma omp parallel for num_threads(4) for (int pp = 0; pp < num_box; pp++) { //fwrite << pp << ":" << std::endl; int p = pp * channels_per_box; int biases_index = mask[pp + mask_offset]; //printf("%d\n", p); const float bias_w = biases[biases_index * 2]; const float bias_h = biases[biases_index * 2 + 1]; //printf("%f %f\n", bias_w, bias_h); const float* xptr = bottom_top_blobs.channel(p); const float* yptr = bottom_top_blobs.channel(p + 1); const float* wptr = bottom_top_blobs.channel(p + 2); const float* hptr = bottom_top_blobs.channel(p + 3); //fprintf(stderr, "%f %f %f %f\n", * xptr, * yptr, * wptr, * hptr); const float* box_score_ptr = bottom_top_blobs.channel(p + 4); // softmax class scores ncnn::Mat scores = bottom_top_blobs.channel_range(p + 5, num_class); //softmax->forward_inplace(scores, opt); for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { // box score float box_score = sigmoid(box_score_ptr[0]); // find class index with max class score int class_index = 0; float class_score = 0.f; for (int q = 0; q < num_class; q++) { float score = sigmoid(scores.channel(q).row(i)[j]); fwrite << score << " "; if (score > class_score) { class_index = q; class_score = score; } } //printf( "%d %f %f\n", class_index, box_score, class_score); float confidence = box_score * class_score; if (confidence > confidence_threshold) { // region box float bbox_cx = (j + sigmoid(xptr[0])) / w; float bbox_cy = (i + sigmoid(yptr[0])) / h; float bbox_w = exp(wptr[0]) * bias_w / net_w; float bbox_h = exp(hptr[0]) * bias_h / net_h; float bbox_xmin = bbox_cx - bbox_w * 0.5f; float bbox_ymin = bbox_cy - bbox_h * 0.5f; float bbox_xmax = bbox_cx + bbox_w * 0.5f; float bbox_ymax = bbox_cy + bbox_h * 0.5f; //float bbox_xmin = bbox_cx - bbox_w * 0.5f >= 0.f ? bbox_cx - bbox_w * 0.5f : 0.f; //float bbox_ymin = bbox_cy - bbox_h * 0.5f >= 0.f ? bbox_cy - bbox_h * 0.5f : 0.f; //float bbox_xmax = bbox_cx + bbox_w * 0.5f <= (float)net_w ? bbox_cx + bbox_w * 0.5f : (float)net_w; //float bbox_ymax = bbox_cy + bbox_h * 0.5f <= (float)net_h ? bbox_cy + bbox_h * 0.5f : (float)net_h; //fprintf(stderr, "%f %f %f %f\n", bbox_xmin, bbox_ymin, bbox_xmax,bbox_ymax); BBoxRect c = { bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax, class_index }; all_box_bbox_rects[pp].push_back(c); all_box_bbox_scores[pp].push_back(confidence); } xptr++; yptr++; wptr++; hptr++; box_score_ptr++; //fwrite << std::endl << std::endl; } } } for (int i = 0; i < num_box; i++) { const std::vector<BBoxRect>& box_bbox_rects = all_box_bbox_rects[i]; const std::vector<float>& box_bbox_scores = all_box_bbox_scores[i]; all_bbox_rects.insert(all_bbox_rects.end(), box_bbox_rects.begin(), box_bbox_rects.end()); all_bbox_scores.insert(all_bbox_scores.end(), box_bbox_scores.begin(), box_bbox_scores.end()); } } //fwrite.close(); // global sort inplace qsort_descent_inplace(all_bbox_rects, all_bbox_scores); // apply nms std::vector<int> picked; nms_sorted_bboxes(all_bbox_rects, picked, nms_threshold); // select std::vector<BBoxRect> bbox_rects; std::vector<float> bbox_scores; for (int i = 0; i < (int)picked.size(); i++) { int z = picked[i]; bbox_rects.push_back(all_bbox_rects[z]); bbox_scores.push_back(all_bbox_scores[z]); } // fill result int num_detected = bbox_rects.size(); if (num_detected == 0) return 0; top_blob.create(6, num_detected, 4u, 0); if (top_blob.empty()) return -100; for (int i = 0; i < num_detected; i++) { const BBoxRect& r = bbox_rects[i]; float score = bbox_scores[i]; float* outptr = top_blob.row(i); outptr[0] = r.label;// +1 for prepend background class outptr[1] = score; outptr[2] = r.xmin; outptr[3] = r.ymin; outptr[4] = r.xmax; outptr[5] = r.ymax; //fprintf(stderr, "%f %f\n", outptr[2], outptr[3]); } return 0; } #endif // YOLODECTION_H
sections.c
#include <stdio.h> #include <omp.h> void funcA() { printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } int main() { #pragma omp parallel { #pragma omp sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } } return 0; }
sbmalloc.old.c
#include "sbconfig.h" #include <errno.h> /* errno */ #include <fcntl.h> /* O_RDWR, O_CREAT, O_EXCL, open, posix_fadvise */ #include <malloc.h> /* struct mallinfo */ #include <signal.h> /* struct sigaction, siginfo_t, sigemptyset, sigaction */ #include <stdio.h> /* stderr, fprintf */ #include <stdlib.h> /* NULL */ #include <string.h> /* memset */ #include <sys/mman.h> /* mmap, munmap, madvise, mprotect */ #include <sys/resource.h> /* rlimit */ #include <sys/stat.h> /* S_IRUSR, S_IWUSR, open */ #include <sys/time.h> /* rlimit */ #include <sys/types.h> /* open */ #include <unistd.h> /* close, read, write, sysconf */ #include "sbmalloc.h" /* sbmalloc library */ /*--------------------------------------------------------------------------*/ /* BUGS: - This code needs to be updated somehow so that acct(CHARGE) always happens before load_range(). Else, data is being loaded before we know it can be safely loaded without over-committing memory. */ /*--------------------------------------------------------------------------*/ /****************************************************************************/ /* Function prototypes for hooks. */ /****************************************************************************/ extern ssize_t libc_read(int const fd, void * const buf, size_t const count); extern ssize_t libc_write(int const fd, void const * const buf, size_t const count); extern int libc_mlock(void const * const addr, size_t const len); extern int libc_munlock(void const * const addr, size_t const len); /*--------------------------------------------------------------------------*/ /****************************************************************************/ /* Constants for the accounting function. */ /****************************************************************************/ enum sb_acct_type { SBACCT_READ, SBACCT_WRITE, SBACCT_WRFAULT, SBACCT_RDFAULT, SBACCT_ALLOC, SBACCT_FREE, SBACCT_CHARGE, SBACCT_DISCHARGE }; /*--------------------------------------------------------------------------*/ /****************************************************************************/ /* Stores information associated with an external memory allocation. */ /****************************************************************************/ struct sb_alloc { size_t msize; /* number of bytes mapped */ size_t app_addr; /* application handle to the shared mapping */ size_t len; /* number of bytes allocated */ size_t npages; /* number of pages allocated */ size_t ld_pages; /* number of loaded pages */ size_t ch_pages; /* number of charged pages */ char * pflags; /* per-page flags vector */ #ifdef USE_CHECKSUM unsigned int * pchksums; /* per-page checksums vector */ #endif char * fname; /* the file that will store the data */ struct sb_alloc * next; /* singly linked-list of allocations */ #ifdef USE_PTHREAD pthread_mutex_t lock; /* mutex guarding struct */ #endif }; /*--------------------------------------------------------------------------*/ /****************************************************************************/ /* Stores information associated with the external memory environment. */ /****************************************************************************/ static struct sb_info { int init; /* initialized variable */ int atexit_registered; /* only register atexit function once */ size_t numsf; /* total number of segfaults */ size_t numrf; /* total number of read segfaults */ size_t numwf; /* total number of write segfaults */ size_t numrd; /* total number of pages read */ size_t numwr; /* total number of pages written */ size_t curpages; /* current pages loaded */ size_t numpages; /* current pages allocated */ size_t totpages; /* total pages allocated */ size_t maxpages; /* maximum number of pages allocated */ size_t pagesize; /* bytes per sbmalloc page */ size_t minsize; /* minimum allocation in bytes handled by sbmalloc */ size_t id; /* unique id for filenames */ char fstem[FILENAME_MAX]; /* the file stem where the data is stored */ struct sb_alloc * head; /* singly linked-list of allocations */ struct sigaction act; /* for the SIGSEGV signal handler */ struct sigaction oldact; /* ... */ int (*acct_charge_cb)(size_t); /* function pointers for accounting */ int (*acct_discharge_cb)(size_t); /* ... */ #ifdef USE_PTHREAD pthread_mutex_t init_lock; /* mutex guarding initialization */ pthread_mutex_t lock; /* mutex guarding struct */ #endif } sb_info = { #ifdef USE_PTHREAD .init_lock = PTHREAD_MUTEX_INITIALIZER, #endif .init = 0, .atexit_registered = 0, .fstem = {'/', 't', 'm', 'p', '/', '\0'}, .acct_charge_cb = NULL, .acct_discharge_cb = NULL }; /****************************************************************************/ /* User specified options. */ /****************************************************************************/ static int sb_opts[SBOPT_NUM]= { [SBOPT_ENABLED] = 1, [SBOPT_NUMPAGES] = 4, [SBOPT_MINPAGES] = 4, [SBOPT_DEBUG] = 0, [SBOPT_LAZYREAD] = 0, [SBOPT_MULTITHREAD] = 1, }; /****************************************************************************/ /* Debug strings. */ /****************************************************************************/ static char sb_dbg_str[SBDBG_NUM][100]= { [SBDBG_FATAL] = "error", [SBDBG_DIAG] = "diagnostic", [SBDBG_LEAK] = "memory", [SBDBG_INFO] = "info" }; /*--------------------------------------------------------------------------*/ #ifdef USE_CHECKSUM /****************************************************************************/ /*! Update a crc32 checksum. */ /****************************************************************************/ static unsigned int sb_internal_crc32_up(unsigned int crc, void const * const buf, size_t size) { static const unsigned int tbl[256] = { 0x00000000U,0x04C11DB7U,0x09823B6EU,0x0D4326D9U,0x130476DCU,0x17C56B6BU, 0x1A864DB2U,0x1E475005U,0x2608EDB8U,0x22C9F00FU,0x2F8AD6D6U,0x2B4BCB61U, 0x350C9B64U,0x31CD86D3U,0x3C8EA00AU,0x384FBDBDU,0x4C11DB70U,0x48D0C6C7U, 0x4593E01EU,0x4152FDA9U,0x5F15ADACU,0x5BD4B01BU,0x569796C2U,0x52568B75U, 0x6A1936C8U,0x6ED82B7FU,0x639B0DA6U,0x675A1011U,0x791D4014U,0x7DDC5DA3U, 0x709F7B7AU,0x745E66CDU,0x9823B6E0U,0x9CE2AB57U,0x91A18D8EU,0x95609039U, 0x8B27C03CU,0x8FE6DD8BU,0x82A5FB52U,0x8664E6E5U,0xBE2B5B58U,0xBAEA46EFU, 0xB7A96036U,0xB3687D81U,0xAD2F2D84U,0xA9EE3033U,0xA4AD16EAU,0xA06C0B5DU, 0xD4326D90U,0xD0F37027U,0xDDB056FEU,0xD9714B49U,0xC7361B4CU,0xC3F706FBU, 0xCEB42022U,0xCA753D95U,0xF23A8028U,0xF6FB9D9FU,0xFBB8BB46U,0xFF79A6F1U, 0xE13EF6F4U,0xE5FFEB43U,0xE8BCCD9AU,0xEC7DD02DU,0x34867077U,0x30476DC0U, 0x3D044B19U,0x39C556AEU,0x278206ABU,0x23431B1CU,0x2E003DC5U,0x2AC12072U, 0x128E9DCFU,0x164F8078U,0x1B0CA6A1U,0x1FCDBB16U,0x018AEB13U,0x054BF6A4U, 0x0808D07DU,0x0CC9CDCAU,0x7897AB07U,0x7C56B6B0U,0x71159069U,0x75D48DDEU, 0x6B93DDDBU,0x6F52C06CU,0x6211E6B5U,0x66D0FB02U,0x5E9F46BFU,0x5A5E5B08U, 0x571D7DD1U,0x53DC6066U,0x4D9B3063U,0x495A2DD4U,0x44190B0DU,0x40D816BAU, 0xACA5C697U,0xA864DB20U,0xA527FDF9U,0xA1E6E04EU,0xBFA1B04BU,0xBB60ADFCU, 0xB6238B25U,0xB2E29692U,0x8AAD2B2FU,0x8E6C3698U,0x832F1041U,0x87EE0DF6U, 0x99A95DF3U,0x9D684044U,0x902B669DU,0x94EA7B2AU,0xE0B41DE7U,0xE4750050U, 0xE9362689U,0xEDF73B3EU,0xF3B06B3BU,0xF771768CU,0xFA325055U,0xFEF34DE2U, 0xC6BCF05FU,0xC27DEDE8U,0xCF3ECB31U,0xCBFFD686U,0xD5B88683U,0xD1799B34U, 0xDC3ABDEDU,0xD8FBA05AU,0x690CE0EEU,0x6DCDFD59U,0x608EDB80U,0x644FC637U, 0x7A089632U,0x7EC98B85U,0x738AAD5CU,0x774BB0EBU,0x4F040D56U,0x4BC510E1U, 0x46863638U,0x42472B8FU,0x5C007B8AU,0x58C1663DU,0x558240E4U,0x51435D53U, 0x251D3B9EU,0x21DC2629U,0x2C9F00F0U,0x285E1D47U,0x36194D42U,0x32D850F5U, 0x3F9B762CU,0x3B5A6B9BU,0x0315D626U,0x07D4CB91U,0x0A97ED48U,0x0E56F0FFU, 0x1011A0FAU,0x14D0BD4DU,0x19939B94U,0x1D528623U,0xF12F560EU,0xF5EE4BB9U, 0xF8AD6D60U,0xFC6C70D7U,0xE22B20D2U,0xE6EA3D65U,0xEBA91BBCU,0xEF68060BU, 0xD727BBB6U,0xD3E6A601U,0xDEA580D8U,0xDA649D6FU,0xC423CD6AU,0xC0E2D0DDU, 0xCDA1F604U,0xC960EBB3U,0xBD3E8D7EU,0xB9FF90C9U,0xB4BCB610U,0xB07DABA7U, 0xAE3AFBA2U,0xAAFBE615U,0xA7B8C0CCU,0xA379DD7BU,0x9B3660C6U,0x9FF77D71U, 0x92B45BA8U,0x9675461FU,0x8832161AU,0x8CF30BADU,0x81B02D74U,0x857130C3U, 0x5D8A9099U,0x594B8D2EU,0x5408ABF7U,0x50C9B640U,0x4E8EE645U,0x4A4FFBF2U, 0x470CDD2BU,0x43CDC09CU,0x7B827D21U,0x7F436096U,0x7200464FU,0x76C15BF8U, 0x68860BFDU,0x6C47164AU,0x61043093U,0x65C52D24U,0x119B4BE9U,0x155A565EU, 0x18197087U,0x1CD86D30U,0x029F3D35U,0x065E2082U,0x0B1D065BU,0x0FDC1BECU, 0x3793A651U,0x3352BBE6U,0x3E119D3FU,0x3AD08088U,0x2497D08DU,0x2056CD3AU, 0x2D15EBE3U,0x29D4F654U,0xC5A92679U,0xC1683BCEU,0xCC2B1D17U,0xC8EA00A0U, 0xD6AD50A5U,0xD26C4D12U,0xDF2F6BCBU,0xDBEE767CU,0xE3A1CBC1U,0xE760D676U, 0xEA23F0AFU,0xEEE2ED18U,0xF0A5BD1DU,0xF464A0AAU,0xF9278673U,0xFDE69BC4U, 0x89B8FD09U,0x8D79E0BEU,0x803AC667U,0x84FBDBD0U,0x9ABC8BD5U,0x9E7D9662U, 0x933EB0BBU,0x97FFAD0CU,0xAFB010B1U,0xAB710D06U,0xA6322BDFU,0xA2F33668U, 0xBCB4666DU,0xB8757BDAU,0xB5365D03U,0xB1F740B4U }; unsigned char const * p = (unsigned char *)buf; while (0 != size--) crc = tbl[*p++ ^ ((crc >> 24) & 0xff)] ^ (crc << 8); return crc; } /****************************************************************************/ /*! Compute crc32 checksum. */ /****************************************************************************/ static unsigned int sb_internal_crc32(void const * const buf, size_t size) { return sb_internal_crc32_up(0xffffffffU, buf, size) ^ 0xffffffffU; } #endif /*--------------------------------------------------------------------------*/ /****************************************************************************/ /*! Causes process to abnormally exit. */ /****************************************************************************/ static void sb_internal_abort(char const * const file, int const line, int const flag) { if (1 == flag) /*#ifdef NDEBUG SBWARN(SBDBG_FATAL)("%s", strerror(errno)); if (NULL == file || 0 == line) {} #else*/ SBWARN(SBDBG_FATAL)("%s:%d: %s", basename(file), line, strerror(errno)); //#endif kill(getpid(), SIGABRT); kill(getpid(), SIGKILL); exit(EXIT_FAILURE); } /****************************************************************************/ /*! Accounting functionality. */ /****************************************************************************/ static void sb_internal_acct(int const acct_type, size_t const arg) { int retval=2; /* the callback functions must be handled outside of the xm_info.lock * section to prevent deadlock. */ if (SBACCT_CHARGE == acct_type && NULL != sb_info.acct_charge_cb) retval = (*sb_info.acct_charge_cb)(arg); if (SBACCT_DISCHARGE == acct_type && NULL != sb_info.acct_discharge_cb) retval = (*sb_info.acct_discharge_cb)(arg); if (retval) {} /* surpress */ //if (SBDBG_INFO > sb_opts[SBOPT_DEBUG]) // return; SB_GET_LOCK(&(sb_info.lock)); switch (acct_type) { case SBACCT_READ: sb_info.numrd += arg; break; case SBACCT_WRITE: sb_info.numwr += arg; break; case SBACCT_RDFAULT: sb_info.numrd += arg; sb_info.numrf++; sb_info.numsf++; break; case SBACCT_WRFAULT: sb_info.numwr += arg; sb_info.numwf++; sb_info.numsf++; break; case SBACCT_ALLOC: sb_info.totpages += arg; sb_info.numpages += arg; break; case SBACCT_FREE: sb_info.numpages -= arg; break; case SBACCT_CHARGE: sb_info.curpages += arg; /*if (0 != arg && 0 == retval) { fprintf(stderr, "[%5d] here 1 %zu\n", (int)getpid(), arg); sb_abort(0); }*/ //if (0 != arg) printf("[%5d] ___charge: %zu\n", (int)getpid(), arg); if (sb_info.curpages > sb_info.maxpages) sb_info.maxpages = sb_info.curpages; break; case SBACCT_DISCHARGE: /*if (0 != arg && 0 == retval) { fprintf(stderr, "[%5d] here 2 %zu\n", (int)getpid(), arg); sb_abort(0); }*/ //if (0 != arg) printf("[%5d] ___discharge: %zu\n", (int)getpid(), arg); sb_info.curpages -= arg; break; } SB_LET_LOCK(&(sb_info.lock)); } /****************************************************************************/ /*! Loads the supplied range of pages in sb_alloc and sets their * protection mode. If state is SBPAGE_SYNC, then this operation will not * overwrite any dirty pages in the range. */ /****************************************************************************/ static size_t sb_internal_load_range(struct sb_alloc * const sb_alloc, size_t const ip_beg, size_t const npages, int const state) { #ifdef USE_CHECKSUM size_t i; unsigned int * pchksums; #endif int fd; size_t ip, psize, app_addr, tmp_addr, tsize, off, ip_end, numrd=0; size_t chunk, lip_beg, lip_end; ssize_t size, ipfirst; char * buf, * pflags; if (NULL == sb_alloc) return 0; if (npages > sb_alloc->npages) return 0; if (ip_beg > sb_alloc->npages-npages) return 0; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; pflags = sb_alloc->pflags; ip_end = ip_beg+npages; #ifdef USE_CHECKSUM pchksums = sb_alloc->pchksums; #endif if (SBPAGE_SYNC == state) { /* Shortcut by checking to see if all pages are already loaded */ if (sb_alloc->ld_pages == sb_alloc->npages) return 0; //SBFADVISE(fd, ip_beg*psize, (ip_end-ip_beg)*psize, // POSIX_FADV_WILLNEED|POSIX_FADV_SEQUENTIAL|POSIX_FADV_NOREUSE); //#if defined(USE_PTHREAD) && defined(USE_BULK) // /* mmap a page into a temporary address with write privileges. */ //# ifdef USE_CHECKSUM // SBMMAP(tmp_addr, (ip_end-ip_beg)*psize, PROT_READ|PROT_WRITE); //# else // SBMMAP(tmp_addr, (ip_end-ip_beg)*psize, PROT_WRITE); //# endif // SBMLOCK(tmp_addr, (ip_end-ip_beg)*psize); //#elif !defined(USE_PTHREAD) # ifdef USE_CHECKSUM SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_READ|PROT_WRITE); # else SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_WRITE); # endif tmp_addr = app_addr; SBMLOCK(tmp_addr+(ip_beg*psize), (ip_end-ip_beg)*psize); //#endif /* Load only those pages which are on disk and are not already synched * with the disk or dirty. */ /*#pragma omp parallel default(none) \ if(sb_opts[SBOPT_MULTITHREAD] > 1 && \ ip_end-ip_beg > (size_t)sb_opts[SBOPT_MULTITHREAD]) \ num_threads(sb_opts[SBOPT_MULTITHREAD]) \ private(i,fd,ip,ipfirst,off,tsize,buf,size,chunk,lip_beg,lip_end) \ shared(ip_end,pflags,psize,app_addr,sb_info,numrd)*/ { /* open the file for reading, and create it if it does not exist */ if (-1 == (fd=open(sb_alloc->fname, O_RDONLY, S_IRUSR|S_IWUSR))) sb_abort(1); chunk = 1+(((ip_end-ip_beg)-1)/omp_get_num_threads()); lip_beg = ip_beg+omp_get_thread_num()*chunk; lip_end = lip_beg+chunk < ip_end ? lip_beg+chunk : ip_end; for (ipfirst=-1,ip=lip_beg; ip<=lip_end; ++ip) { if (ip != lip_end && !SBISSET(pflags[ip], SBPAGE_SYNC) && !SBISSET(pflags[ip], SBPAGE_DIRTY) && SBISSET(pflags[ip], SBPAGE_ONDISK)) { if (-1 == ipfirst) ipfirst = ip; } else if (-1 != ipfirst) { //#if defined(USE_PTHREAD) && !defined(USE_BULK) //# ifdef USE_CHECKSUM // SBMMAP(tmp_addr, (ip-ipfirst)*psize, PROT_READ|PROT_WRITE); //# else // SBMMAP(tmp_addr, (ip-ipfirst)*psize, PROT_WRITE); //# endif //#endif //#if defined(USE_PTHREAD) && !defined(USE_BULK) // off = 0; //#elif defined(USE_PTHREAD) // off = (ipfirst-ip_beg)*psize; //#else off = ipfirst*psize; //#endif buf = (char*)(tmp_addr+off); tsize = (ip-ipfirst)*psize; if (-1 == lseek(fd, ipfirst*psize, SEEK_SET)) sb_abort(1); do { if (-1 == (size=libc_read(fd, buf, tsize))) sb_abort(1); buf += size; tsize -= size; } while (tsize > 0); #ifdef USE_CHECKSUM buf = (char*)(tmp_addr+off); for (i=0; i<ip-ipfirst; ++i) { sb_assert(sb_internal_crc32(buf+i*psize, psize) == pchksums[ipfirst+i]); } #endif //#if defined(USE_PTHREAD) && !defined(USE_BULK) // /* remove write privileges from temporary pages and grant read-only // * privileges. */ // SBMPROTECT(tmp_addr+off, (ip-ipfirst)*psize, PROT_READ); // /* mremap temporary pages to the correct location in persistent // * memory region. */ // SBMREMAP(tmp_addr+off, (ip-ipfirst)*psize, app_addr+(ipfirst*psize)); //#endif /*#pragma omp critical*/ numrd += (ip-ipfirst); ipfirst = -1; } } /* close file */ if (-1 == close(fd)) sb_abort(1); } //#if !defined(USE_PTHREAD) /* remove write privileges from pages and grant read-only privileges. */ SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_READ); //# ifdef USE_BULK // /* mremap temporary pages to the correct location in persistent memory // * region. */ // SBMREMAP(tmp_addr, (ip_end-ip_beg)*psize, app_addr+(ip_beg*psize)); //# endif //#endif for (ip=ip_beg; ip<ip_end; ++ip) { if (SBISSET(pflags[ip], SBPAGE_DUMP)) { /* - DUMP flag */ pflags[ip] &= ~SBPAGE_DUMP; } /* count unloaded pages */ if (!SBISSET(pflags[ip], SBPAGE_DIRTY) && !SBISSET(pflags[ip], SBPAGE_SYNC)) { sb_assert(sb_alloc->ld_pages < sb_alloc->npages); sb_alloc->ld_pages++; /* + SYNC flag */ pflags[ip] |= SBPAGE_SYNC; } //#if !defined(USE_PTHREAD) /* leave dirty pages dirty */ else if (SBISSET(pflags[ip], SBPAGE_DIRTY)) { SBMPROTECT(app_addr+(ip*psize), psize, PROT_READ|PROT_WRITE); } //#endif } numrd = SB_TO_SYS(numrd, psize); } else if (SBPAGE_DIRTY == state) { for (ip=ip_beg; ip<ip_end; ++ip) { /* count unloaded pages */ if (!SBISSET(pflags[ip], SBPAGE_SYNC) && !SBISSET(pflags[ip], SBPAGE_DIRTY)) { sb_assert(sb_alloc->ld_pages < sb_alloc->npages); sb_alloc->ld_pages++; } /* - SYNC/DUMP/ONDISK flag */ pflags[ip] &= ~(SBPAGE_SYNC|SBPAGE_DUMP|SBPAGE_ONDISK); /* + DIRTY flag */ pflags[ip] |= SBPAGE_DIRTY; } SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_READ|PROT_WRITE); SBMLOCK(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize); } return numrd; } /****************************************************************************/ /*! Synchronize file on disk with the supplied range of pages in sb_alloc * and set their protection mode. */ /****************************************************************************/ static size_t sb_internal_sync_range(struct sb_alloc * const sb_alloc, size_t const ip_beg, size_t const npages) { #ifdef USE_CHECKSUM size_t i; unsigned int * pchksums; #endif int fd; size_t ip, psize, app_addr, tsize, off, ip_end, num=0; size_t chunk, lip_beg, lip_end; ssize_t size, ipfirst; char * buf, * pflags; if (NULL == sb_alloc) return 0; if (npages > sb_alloc->npages) return 0; if (ip_beg > sb_alloc->npages-npages) return 0; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; pflags = sb_alloc->pflags; ip_end = ip_beg+npages; #ifdef USE_CHECKSUM pchksums = sb_alloc->pchksums; #endif SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_READ); /* go over the pages and write the ones that have changed. perform the writes in contigous chunks of changed pages. */ /*#pragma omp parallel default(none) \ if(sb_opts[SBOPT_MULTITHREAD] > 1 && \ ip_end-ip_beg > (size_t)sb_opts[SBOPT_MULTITHREAD]) \ num_threads(sb_opts[SBOPT_MULTITHREAD]) \ private(fd,ip,ipfirst,off,tsize,buf,size,chunk,lip_beg,lip_end) \ shared(ip_end,pflags,psize,app_addr,sb_info,num,stdout)*/ { /* open the file for writing, and create it if it does not exist */ if (-1 == (fd=open(sb_alloc->fname, O_WRONLY, S_IRUSR|S_IWUSR))) sb_abort(1); chunk = 1+(((ip_end-ip_beg)-1)/omp_get_num_threads()); lip_beg = ip_beg+omp_get_thread_num()*chunk; lip_end = lip_beg+chunk < ip_end ? lip_beg+chunk : ip_end; for (ipfirst=-1,ip=lip_beg; ip<=lip_end; ++ip) { if (ip != lip_end && SBISSET(pflags[ip], SBPAGE_DIRTY)) { pflags[ip] |= SBPAGE_ONDISK; if (-1 == ipfirst) ipfirst = ip; } else if (-1 != ipfirst) { off = ipfirst*psize; tsize = (ip-ipfirst)*psize; buf = (char *)(app_addr+off); /* write from [ipfirst...ip) */ if (-1 == lseek(fd, off, SEEK_SET)) sb_abort(1); #ifdef USE_CHECKSUM for (i=0; i<ip-ipfirst; ++i) { //fprintf(stderr, "w[%5d](%5zu) %p %u\n", (int)getpid(), ipfirst+i, // (void*)(buf+i*psize), sb_internal_crc32(buf+i*psize, psize)); //fflush(stdout); pchksums[ipfirst+i] = sb_internal_crc32(buf+i*psize, psize); } #endif /* write the data */ do { if (-1 == (size=libc_write(fd, buf, tsize))) sb_abort(1); buf += size; tsize -= size; } while (tsize > 0); /*#pragma omp critical*/ num += (ip-ipfirst); ipfirst = -1; } } /* close file */ if (-1 == close(fd)) sb_abort(1); } for (ip=ip_beg; ip<ip_end; ++ip) { if (SBISSET(pflags[ip], SBPAGE_DUMP)) { /* - DUMP flag */ pflags[ip] &= ~(SBPAGE_DUMP); } /* count loaded pages */ if (SBISSET(pflags[ip], SBPAGE_SYNC) || SBISSET(pflags[ip], SBPAGE_DIRTY)) { /* - SYNC/DIRTY flag */ pflags[ip] &= ~(SBPAGE_SYNC|SBPAGE_DIRTY); sb_assert(sb_alloc->ld_pages > 0); sb_alloc->ld_pages--; sb_assert(sb_alloc->ch_pages > 0); sb_alloc->ch_pages--; } } SBMUNLOCK(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize); SBMADVISE(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, MADV_DONTNEED); SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_NONE); return SB_TO_SYS(num, psize); } /****************************************************************************/ /*! Dump changes to a specified region of memory and treat it as if it was * newly allocated. */ /****************************************************************************/ extern size_t sb_internal_dump_range(struct sb_alloc * const sb_alloc, size_t const ip_beg, size_t const npages) { size_t ip, psize, app_addr, ip_end, num=0; char * pflags; if (NULL == sb_alloc) return 0; if (npages > sb_alloc->npages) return 0; if (ip_beg > sb_alloc->npages-npages) return 0; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; pflags = sb_alloc->pflags; ip_end = ip_beg+npages; SBMUNLOCK(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize); SBMADVISE(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, MADV_DONTNEED); SBMPROTECT(app_addr+(ip_beg*psize), (ip_end-ip_beg)*psize, PROT_NONE); for (ip=ip_beg; ip<ip_end; ++ip) { /* count loaded pages */ if (SBISSET(pflags[ip], SBPAGE_SYNC) || SBISSET(pflags[ip], SBPAGE_DIRTY)) { sb_assert(sb_alloc->ld_pages > 0); sb_alloc->ld_pages--; sb_assert(sb_alloc->ch_pages > 0); sb_alloc->ch_pages--; } /* - SYNC/DIRTY/ONDISK flag */ sb_alloc->pflags[ip] &= ~(SBPAGE_SYNC|SBPAGE_DIRTY|SBPAGE_ONDISK); /* + DUMP flag */ sb_alloc->pflags[ip] |= SBPAGE_DUMP; } /* convert to system pages */ return SB_TO_SYS(num, psize); } /****************************************************************************/ /*! Counts the number of pages of an allocation which are not in a specific * state. */ /****************************************************************************/ static size_t sb_internal_probe(struct sb_alloc * const sb_alloc, size_t const ip_beg, size_t const npages, size_t const state) { size_t ip, psize, ip_end, num=0; char * pflags; if (NULL == sb_alloc) return 0; if (npages > sb_alloc->npages) return 0; if (ip_beg > sb_alloc->npages-npages) return 0; psize = sb_info.pagesize; pflags = sb_alloc->pflags; ip_end = ip_beg+npages; /* count pages in state*/ for (ip=ip_beg; ip<ip_end; ++ip) num += !(pflags[ip]&state); /* convert to system pages */ return SB_TO_SYS(num, psize); } /****************************************************************************/ /*! Returns a pointer to an sb_alloc that contains the specified * address. */ /****************************************************************************/ static struct sb_alloc * sb_internal_find(size_t const addr) { size_t len, app_addr; struct sb_alloc * sb_alloc; SB_GET_LOCK(&(sb_info.lock)); for (sb_alloc=sb_info.head; NULL!=sb_alloc; sb_alloc=sb_alloc->next) { SB_GET_LOCK(&(sb_alloc->lock)); len = sb_alloc->len; app_addr = sb_alloc->app_addr; if (addr >= app_addr && addr < app_addr+len) { SB_LET_LOCK(&(sb_alloc->lock)); break; } SB_LET_LOCK(&(sb_alloc->lock)); } SB_LET_LOCK(&(sb_info.lock)); return sb_alloc; } /****************************************************************************/ /*! The SIGSEGV handler. */ /****************************************************************************/ static void sb_internal_handler(int const sig, siginfo_t * const si, void * const ctx) { size_t ip, rd_num=0, wr_num=0, ch_pages=0; size_t addr=(size_t)si->si_addr; struct sb_alloc * sb_alloc=NULL; if (SIGSEGV != sig) { SBWARN(SBDBG_DIAG)("received incorrect signal (%d)", sig); return; } /* find the sb_alloc */ if (NULL == (sb_alloc=sb_internal_find(addr))) { SBWARN(SBDBG_FATAL)("[%5d] received SIGSEGV on unhandled memory location " "(%p)", (int)getpid(), (void*)addr); sb_abort(0); } SB_GET_LOCK(&(sb_alloc->lock)); ip = (addr-sb_alloc->app_addr)/sb_info.pagesize; if (!(SBISSET(sb_alloc->pflags[ip], SBPAGE_SYNC))) { if (0 == sb_opts[SBOPT_LAZYREAD]) { /* charge any pages which aren't charged */ ch_pages = sb_alloc->npages-sb_alloc->ch_pages; } else { /* if no pages have been charged for this allocation, then charge the * whole allocation */ if (0 == sb_alloc->ch_pages) { sb_assert(0 == sb_alloc->ld_pages); ch_pages = sb_alloc->npages; } /* otherwise, this allocation has already been charged once completely; * now we must only charge for pages which have since been dumped */ else if (SBISSET(sb_alloc->pflags[ip], SBPAGE_DUMP)) { sb_assert(sb_alloc->ch_pages < sb_alloc->npages); ch_pages = 1; } } } else { /* by the nature of this signal handler, this offending page is * necessarily charged. */ ch_pages = 0; } sb_alloc->ch_pages += ch_pages; /* convert to system pages */ ch_pages = SB_TO_SYS(ch_pages, sb_info.pagesize); /* charge for the pages to be loaded before actually loading them to ensure * there is room in memory */ //SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_CHARGE, ch_pages); //SB_GET_LOCK(&(sb_alloc->lock)); if (!(SBISSET(sb_alloc->pflags[ip], SBPAGE_SYNC))) { if (0 == sb_opts[SBOPT_LAZYREAD]) { rd_num = sb_internal_load_range(sb_alloc, 0, sb_alloc->npages, SBPAGE_SYNC); } else { rd_num = sb_internal_load_range(sb_alloc, ip, 1, SBPAGE_SYNC); } } else { (void)sb_internal_load_range(sb_alloc, ip, 1, SBPAGE_DIRTY); } SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_RDFAULT, rd_num); sb_internal_acct(SBACCT_WRFAULT, wr_num); if (NULL == ctx) {} /* suppress unused warning */ } /****************************************************************************/ /*! Shuts down the sbmalloc subsystem. */ /****************************************************************************/ static void sb_internal_destroy(void) { SB_GET_LOCK(&(sb_info.init_lock)); if (0 == sb_info.init) goto DONE; sb_info.init = 0; if (-1 == sigaction(SIGSEGV, &(sb_info.oldact), NULL)) sb_abort(1); SB_FREE_LOCK(&(sb_info.lock)); fprintf(stderr, "numrd=%zu\n", sb_info.numrd); fprintf(stderr, "numwr=%zu\n", sb_info.numwr); DONE: SB_LET_LOCK(&(sb_info.init_lock)); } /****************************************************************************/ /*! Initializes the sbmalloc subsystem. */ /****************************************************************************/ static void sb_internal_init(void) { size_t npages, minpages; struct rlimit lim; SB_GET_LOCK(&(sb_info.init_lock)); if (1 == sb_info.init) goto DONE; npages = sb_opts[SBOPT_NUMPAGES]; minpages = sb_opts[SBOPT_MINPAGES]; sb_info.init = 1; sb_info.id = 0; sb_info.numsf = 0; sb_info.numrf = 0; sb_info.numwf = 0; sb_info.numrd = 0; sb_info.numwr = 0; sb_info.curpages = 0; sb_info.numpages = 0; sb_info.maxpages = 0; sb_info.pagesize = npages*sysconf(_SC_PAGESIZE); sb_info.minsize = minpages*sysconf(_SC_PAGESIZE); sb_info.head = NULL; /* setup the signal handler */ sb_info.act.sa_flags = SA_SIGINFO; sb_info.act.sa_sigaction = sb_internal_handler; if (-1 == sigemptyset(&(sb_info.act.sa_mask))) goto CLEANUP; if (-1 == sigaction(SIGSEGV, &(sb_info.act), &(sb_info.oldact))) goto CLEANUP; if (-1 == getrlimit(RLIMIT_MEMLOCK, &lim)) goto CLEANUP; lim.rlim_cur = lim.rlim_max; /*printf("[%5d] %zu %zu\n", (int)getpid(), lim.rlim_cur, lim.rlim_max);*/ if (-1 == setrlimit(RLIMIT_MEMLOCK, &lim)) goto CLEANUP; /* setup the sb_info mutex */ SB_INIT_LOCK(&(sb_info.lock)); DONE: SB_LET_LOCK(&(sb_info.init_lock)); return; CLEANUP: sb_abort(1); } /*--------------------------------------------------------------------------*/ /****************************************************************************/ /*! Set parameters for the sbmalloc subsystem. */ /****************************************************************************/ extern int SB_mallopt(int const param, int const value) { if (param >= SBOPT_NUM) { SBWARN(SBDBG_DIAG)("param too large"); return -1; } /*if (SBOPT_NUMPAGES == param && 0 != sb_opts[SBOPT_ENABLED]) { SBWARN(SBDBG_DIAG)("cannot change pagesize after sb has been enabled"); return -1; }*/ sb_opts[param] = value; return 0; } /****************************************************************************/ /*! Set parameters for the sbmalloc subsystem. */ /****************************************************************************/ extern int SB_mallget(int const param) { if (param >= SBOPT_NUM) { SBWARN(SBDBG_DIAG)("param too large"); return -1; } return sb_opts[param]; } /****************************************************************************/ /* Return some memory statistics */ /****************************************************************************/ extern struct mallinfo SB_mallinfo(void) { struct mallinfo mi; mi.smblks = sb_info.numrf; /* number of read faults */ mi.ordblks = sb_info.numwf; /* number of write faults */ mi.hblks = sb_info.numsf; /* number of segmentation faults */ mi.usmblks = sb_info.numrd; /* number of pages read from disk */ mi.fsmblks = sb_info.numwr; /* number of pages wrote to disk */ mi.uordblks = sb_info.curpages; /* pages loaded at time of call */ mi.fordblks = sb_info.numpages; /* pages allocated at time of call */ mi.arena = sb_info.maxpages; /* maximum concurrent memory allocated */ mi.keepcost = sb_info.totpages; /* total number of allocated pages */ return mi; } /****************************************************************************/ /*! Set parameters for the sbmalloc subsystem. */ /****************************************************************************/ extern int SB_fstem(char const * const fstem) { SB_GET_LOCK(&(sb_info.lock)); strncpy(sb_info.fstem, fstem, FILENAME_MAX-1); sb_info.fstem[FILENAME_MAX-1] = '\0'; SB_LET_LOCK(&(sb_info.lock)); return 0; } /****************************************************************************/ /*! Set functions for sbmalloc accounting system */ /****************************************************************************/ extern int SB_acct(int (*acct_charge_cb)(size_t), int (*acct_discharge_cb)(size_t)) { SB_GET_LOCK(&(sb_info.lock)); sb_info.acct_charge_cb = acct_charge_cb; sb_info.acct_discharge_cb = acct_discharge_cb; SB_LET_LOCK(&(sb_info.lock)); return 0; } /****************************************************************************/ /*! Check if an allocation was created by the sbmalloc system. */ /****************************************************************************/ extern int SB_exists(void const * const addr) { SB_INIT_CHECK return (NULL != sb_internal_find((size_t)addr)); } /****************************************************************************/ /*! Synchronize anonymous mmap with disk. If addr or addr+len falls within a * page, then that whole page will be synchronized. */ /****************************************************************************/ extern size_t SB_sync(void const * const addr, size_t len) { size_t psize, app_addr, npages, ipfirst, ipend, num, ld_pages; struct sb_alloc * sb_alloc; SB_INIT_CHECK if (0 == len) return 0; if (NULL == (sb_alloc=sb_internal_find((size_t)addr))) { SBWARN(SBDBG_DIAG)("attempt to synchronize an unhandled memory " "location (%p)", addr); return 0; } SB_GET_LOCK(&(sb_alloc->lock)); /* shortcut */ if (0 == sb_alloc->ld_pages) { SB_LET_LOCK(&(sb_alloc->lock)); return 0; } if (sb_alloc->len < len) len = sb_alloc->app_addr+sb_alloc->len-(size_t)addr; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; npages = sb_alloc->npages; /* need to make sure that all bytes are captured, thus ipfirst is a floor * operation and ipend is a ceil operation. */ ipfirst = ((size_t)addr == app_addr) ? 0 : ((size_t)addr-app_addr)/psize; ipend = ((size_t)addr+len == app_addr+sb_alloc->len) ? npages : 1+(((size_t)addr+len-app_addr-1)/psize); ld_pages = sb_alloc->ld_pages; num = sb_internal_sync_range(sb_alloc, ipfirst, ipend-ipfirst); ld_pages = ld_pages-sb_alloc->ld_pages; /* convert to system pages */ ld_pages = SB_TO_SYS(ld_pages, psize); SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_WRITE, num); sb_internal_acct(SBACCT_DISCHARGE, ld_pages); return ld_pages; } /****************************************************************************/ /*! Synchronize all anonymous mmaps with disk. */ /****************************************************************************/ extern size_t SB_syncall(void) { size_t num=0, ld_pages=0; struct sb_alloc * sb_alloc; SB_INIT_CHECK SB_GET_LOCK(&(sb_info.lock)); for (sb_alloc=sb_info.head; NULL!=sb_alloc; sb_alloc=sb_alloc->next) { SB_GET_LOCK(&(sb_alloc->lock)); ld_pages += sb_alloc->ld_pages; num += sb_internal_sync_range(sb_alloc, 0, sb_alloc->npages); sb_assert(0 == sb_alloc->ld_pages); sb_assert(0 == sb_alloc->ch_pages); SB_LET_LOCK(&(sb_alloc->lock)); } SB_LET_LOCK(&(sb_info.lock)); /* convert to system pages */ ld_pages = SB_TO_SYS(ld_pages, sb_info.pagesize); sb_internal_acct(SBACCT_WRITE, num); sb_internal_acct(SBACCT_DISCHARGE, ld_pages); return ld_pages; } /****************************************************************************/ /*! Load anonymous mmap from disk. If addr or addr+len falls within a page, * then that whole page will be loaded. */ /****************************************************************************/ extern size_t SB_load(void const * const addr, size_t len, int const state) { size_t psize, app_addr, npages, ipfirst, ipend; size_t num=0, _num1=0, _num2=0, ld_pages=0, _ld_pages1=0, _ld_pages2=0; struct sb_alloc * sb_alloc; SB_INIT_CHECK if (0 == len) return 0; if (NULL == (sb_alloc=sb_internal_find((size_t)addr))) { //SBWARN(SBDBG_DIAG)("attempt to load an unhandled memory location (%p)", // addr); return 0; } SB_GET_LOCK(&(sb_alloc->lock)); /* shortcut */ if (SBPAGE_SYNC == state && sb_alloc->npages == sb_alloc->ld_pages) { SB_LET_LOCK(&(sb_alloc->lock)); return 0; } if (sb_alloc->len < len) len = sb_alloc->app_addr+sb_alloc->len-(size_t)addr; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; npages = sb_alloc->npages; /* need to make sure that all bytes are captured, thus ipfirst is a floor * operation and ipend is a ceil operation. */ ipfirst = ((size_t)addr == app_addr) ? 0 : ((size_t)addr-app_addr)/psize; ipend = ((size_t)addr+len == app_addr+sb_alloc->len) ? npages : 1+(((size_t)addr+len-app_addr-1)/psize); #if 0 /* Special handling when pages are being set to dirty state. */ if (SBPAGE_DIRTY == state) { sb_assert((void*)(sb_alloc->app_addr+ipfirst*psize) <= addr); sb_assert(addr <= (void*)(sb_alloc->app_addr+(ipfirst+1)*psize)); /* If first page is shared, then it should first be sync'd to make sure * that any data in the shared part is loaded. */ if ((void*)(sb_alloc->app_addr+ipfirst*psize) != addr) { _ld_pages1 = sb_alloc->ld_pages; //_num1 = sb_internal_load_range(sb_alloc, ipfirst, 1, SBPAGE_SYNC); _ld_pages1 = sb_alloc->ld_pages-_ld_pages1; } sb_assert((void*)(sb_alloc->app_addr+(ipend-1)*psize) <= addr); sb_assert(addr <= (void*)(sb_alloc->app_addr+ipend*psize)); /* If last page is shared, then it should first be sync'd to make sure * that any data in the shared part is loaded. */ if (sb_alloc->app_addr+ipend*psize != (size_t)addr+len) { _ld_pages2 = sb_alloc->ld_pages; //_num2 = sb_internal_load_range(sb_alloc, ipend-1, 1, SBPAGE_SYNC); _ld_pages2 = sb_alloc->ld_pages-_ld_pages2; } } #endif if (ipfirst < ipend) { _ld_pages1 = sb_internal_probe(sb_alloc, ipfirst, ipend-ipfirst, SBPAGE_SYNC|SBPAGE_DIRTY); //SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_CHARGE, _ld_pages1); //SB_GET_LOCK(&(sb_alloc->lock)); ld_pages = sb_alloc->ld_pages; num = sb_internal_load_range(sb_alloc, ipfirst, ipend-ipfirst, state); ld_pages = sb_alloc->ld_pages-ld_pages; sb_assert(_ld_pages1 == SB_TO_SYS(ld_pages, psize)); _ld_pages1 = 0; } num = num + _num1 + _num2; ld_pages = ld_pages + _ld_pages1 + _ld_pages2; /* charge */ sb_alloc->ch_pages += ld_pages; /* convert to system pages */ ld_pages = SB_TO_SYS(ld_pages, psize); SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_READ, num); //sb_internal_acct(SBACCT_CHARGE, ld_pages); return ld_pages; } /****************************************************************************/ /*! Load all anonymous mmaps from disk. */ /****************************************************************************/ extern size_t SB_loadall(int const state) { size_t num=0, ld_pages=0; struct sb_alloc * sb_alloc; SB_INIT_CHECK SB_GET_LOCK(&(sb_info.lock)); for (sb_alloc=sb_info.head; NULL!=sb_alloc; sb_alloc=sb_alloc->next) { SB_GET_LOCK(&(sb_alloc->lock)); sb_alloc->ch_pages -= sb_alloc->npages-sb_alloc->ld_pages; sb_assert(sb_alloc->npages == sb_alloc->ch_pages); ld_pages += sb_alloc->npages-sb_alloc->ld_pages; num += sb_internal_load_range(sb_alloc, 0, sb_alloc->npages, state); sb_assert(sb_alloc->npages == sb_alloc->ld_pages); SB_LET_LOCK(&(sb_alloc->lock)); } SB_LET_LOCK(&(sb_info.lock)); /* convert to system pages */ ld_pages = SB_TO_SYS(ld_pages, sb_info.pagesize); sb_internal_acct(SBACCT_READ, num); sb_internal_acct(SBACCT_CHARGE, ld_pages); return ld_pages; } /****************************************************************************/ /*! Dump changes to a specified region of memory and treat it as if it was * newly allocated. If addr or addr+len falls within a page, then only the * full pages that are in the range addr..addr+len will be discarded. */ /****************************************************************************/ extern size_t SB_dump(void const * const addr, size_t len) { size_t psize, app_addr, npages, ipfirst, ipend, ld_pages=0; struct sb_alloc * sb_alloc; SB_INIT_CHECK if (0 == len) return 0; if (NULL == (sb_alloc=sb_internal_find((size_t)addr))) { //SBWARN(SBDBG_DIAG)("attempt to dump an unhandled memory location (%p)", // addr); return 0; } SB_GET_LOCK(&(sb_alloc->lock)); /* shortcut */ if (0 == sb_alloc->ld_pages) { SB_LET_LOCK(&(sb_alloc->lock)); return 0; } if (sb_alloc->len < len) len = sb_alloc->app_addr+sb_alloc->len-(size_t)addr; psize = sb_info.pagesize; app_addr = sb_alloc->app_addr; npages = sb_alloc->npages; /* can only dump pages fully within range, thus ipfirst is a ceil * operation and ipend is a floor operation. */ ipfirst = ((size_t)addr == app_addr) ? 0 : 1+(((size_t)addr-app_addr-1)/psize); ipend = ((size_t)addr+len == app_addr+sb_alloc->len) ? npages : ((size_t)addr+len-app_addr)/psize; if (ipfirst < ipend) { ld_pages = sb_alloc->ld_pages; (void)sb_internal_dump_range(sb_alloc, ipfirst, ipend-ipfirst); ld_pages = ld_pages-sb_alloc->ld_pages; /* convert to system pages */ ld_pages = SB_TO_SYS(ld_pages, psize); } SB_LET_LOCK(&(sb_alloc->lock)); sb_internal_acct(SBACCT_DISCHARGE, ld_pages); return ld_pages; } /****************************************************************************/ /*! Dump all anonymous mmaps to disk. */ /****************************************************************************/ extern size_t SB_dumpall(void) { size_t num=0; struct sb_alloc * sb_alloc; SB_INIT_CHECK SB_GET_LOCK(&(sb_info.lock)); for (sb_alloc=sb_info.head; NULL!=sb_alloc; sb_alloc=sb_alloc->next) { SB_GET_LOCK(&(sb_alloc->lock)); num += sb_internal_dump_range(sb_alloc, 0, sb_alloc->npages); sb_assert(0 == sb_alloc->ld_pages); SB_LET_LOCK(&(sb_alloc->lock)); } SB_LET_LOCK(&(sb_info.lock)); return num; } /****************************************************************************/ /*! Allocate memory via anonymous mmap. */ /****************************************************************************/ extern void * SB_malloc(size_t const len) { #ifdef USE_CHECKSUM unsigned int * pchksums=NULL; #endif int fd=-1; size_t npages, psize, meta_size, msize; size_t app_addr=(size_t)MAP_FAILED; char * fname=NULL, * pflags=NULL; struct sb_alloc * sb_alloc=NULL; SB_INIT_CHECK /* shortcut */ if (0 == len) { SBWARN(SBDBG_DIAG)("attempt to allocate 0 bytes"); return NULL; } /* get memory info */ psize = sb_info.pagesize; npages = (len+psize-1)/psize; /* compute allocation sizes */ meta_size = (sizeof(struct sb_alloc))+(npages+1)+(100+strlen(sb_info.fstem)); #ifdef USE_CHECKSUM meta_size += (sizeof(unsigned int)*npages); #endif meta_size = (1+((meta_size-1)/psize))*psize; msize = npages*psize+meta_size; /* allocate memory */ SBMMAP(app_addr, msize, PROT_NONE); /* read/write protect internal memory */ SBMPROTECT(app_addr+npages*psize, meta_size, PROT_READ|PROT_WRITE); SBMLOCK(app_addr+npages*psize, meta_size); /* allocate the allocation structure */ sb_alloc = (struct sb_alloc*)(app_addr+npages*psize); /* allocate the per-page flag vector */ pflags = (char*)((size_t)sb_alloc+sizeof(struct sb_alloc)); /* create the filename for storage purposes */ fname = (char*)((size_t)pflags+(npages+1)); #ifdef USE_CHECKSUM pchksums = (unsigned int*)((size_t)fname+100+strlen(sb_info.fstem)); #endif /* create and truncate the file to size */ if (0 > sprintf(fname, "%s%d-%p", sb_info.fstem, (int)getpid(), (void*)sb_alloc)) { SBWARN(SBDBG_DIAG)("%s", strerror(errno)); goto CLEANUP; } if (-1 == (fd=open(fname, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR))) { SBWARN(SBDBG_DIAG)("%s", strerror(errno)); goto CLEANUP; } if (-1 == ftruncate(fd, npages*psize)) { SBWARN(SBDBG_DIAG)("%s", strerror(errno)); goto CLEANUP; } if (-1 == close(fd)) { SBWARN(SBDBG_DIAG)("%s", strerror(errno)); goto CLEANUP; } /* fd = -1; */ /* Only need if a goto CLEANUP follows */ /* populate sb_alloc structure */ sb_alloc->msize = msize; sb_alloc->ld_pages = 0; sb_alloc->ch_pages = 0; sb_alloc->npages = npages; sb_alloc->len = len; sb_alloc->fname = fname; sb_alloc->app_addr = app_addr; sb_alloc->pflags = pflags; #ifdef USE_CHECKSUM sb_alloc->pchksums = pchksums; #endif /* initialize sb_alloc lock */ SB_INIT_LOCK(&(sb_alloc->lock)); /* add to linked-list */ SB_GET_LOCK(&(sb_info.lock)); sb_alloc->next = sb_info.head; sb_info.head = sb_alloc; SB_LET_LOCK(&(sb_info.lock)); /* accounting */ sb_internal_acct(SBACCT_ALLOC, SB_TO_SYS(npages, psize)); return (void *)sb_alloc->app_addr; CLEANUP: if (NULL != sb_alloc) free(sb_alloc); if (NULL != pflags) free(pflags); if (MAP_FAILED != (void*)app_addr) SBMUNMAP(app_addr, msize); if (-1 != fd) (void)close(fd); return NULL; } /****************************************************************************/ /*! Frees the memory associated with an anonymous mmap. */ /****************************************************************************/ extern void SB_free(void * const addr) { struct sb_alloc * sb_alloc=NULL, * psb_alloc=NULL; SB_INIT_CHECK SB_GET_LOCK(&(sb_info.lock)); for (sb_alloc=sb_info.head; NULL!=sb_alloc; sb_alloc=sb_alloc->next) { SB_GET_LOCK(&(sb_alloc->lock)); if (sb_alloc->app_addr == (size_t)addr) { SB_LET_LOCK(&(sb_alloc->lock)); break; } SB_LET_LOCK(&(sb_alloc->lock)); psb_alloc = sb_alloc; } if (NULL == sb_alloc) { SBWARN(SBDBG_DIAG)("attempt to free an unhandled memory location (%p)", addr); sb_abort(0); } /* update the link-list */ if (NULL == psb_alloc) sb_info.head = sb_alloc->next; else psb_alloc->next = sb_alloc->next; SB_LET_LOCK(&(sb_info.lock)); /* accounting */ sb_internal_acct(SBACCT_DISCHARGE, SB_TO_SYS(sb_alloc->ch_pages, sb_info.pagesize)); sb_internal_acct(SBACCT_FREE, SB_TO_SYS(sb_alloc->npages, sb_info.pagesize)); /* free resources */ SB_FREE_LOCK(&(sb_alloc->lock)); if (-1 == unlink(sb_alloc->fname)) sb_abort(1); SBMUNLOCK(sb_alloc->app_addr, sb_alloc->msize); SBMUNMAP(sb_alloc->app_addr, sb_alloc->msize); } /****************************************************************************/ /*! Frees the memory associated with an anonymous mmap. */ /****************************************************************************/ extern void SB_init(void) { sb_internal_init(); } /****************************************************************************/ /*! Frees the memory associated with an anonymous mmap. */ /****************************************************************************/ extern void SB_finalize(void) { sb_internal_destroy(); } /****************************************************************************/ /*! Report swap usage. */ /****************************************************************************/ extern int SB_swap_usage(int const tag) { int fd=-1; size_t size, used; ssize_t ret, retval=-1; char * tok; char buf[16384], file[FILENAME_MAX]; if (-1 == (fd=open("/proc/swaps", O_RDONLY))) goto CLEANUP; if (-1 == libc_read(fd, buf, sizeof(buf))) goto CLEANUP; /* skip header line */ tok = strtok(buf, "\n"); /* loop through swap lines */ tok = strtok(NULL, "\n"); while (NULL != tok) { if (3 != (ret=sscanf(tok, "%s %*s %zu %zu", file, &size, &used))) goto CLEANUP; if (0 > printf("[%5d:%d] swap usage on %s: %zu / %zu\n", (int)getpid(), tag, file, used, size)) { goto CLEANUP; } tok = strtok(NULL, "\n"); } retval = 0; CLEANUP: if (-1 == retval) printf("swap usage failed\n"); if (-1 != fd) close(fd); return retval; }
pfmg3_setup_rap.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "pfmg.h" /*-------------------------------------------------------------------------- * Macro to "change coordinates". This routine is written as though * coarsening is being done in the z-direction. This macro is used to * allow for coarsening to be done in the x- and y-directions also. *--------------------------------------------------------------------------*/ #define MapIndex(in_index, cdir, out_index) \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 2); \ cdir = (cdir + 1) % 3; \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \ cdir = (cdir + 1) % 3; \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \ cdir = (cdir + 1) % 3; /*-------------------------------------------------------------------------- * Sets up new coarse grid operator stucture. *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_PFMG3CreateRAPOp( hypre_StructMatrix *R, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructGrid *coarse_grid, HYPRE_Int cdir ) { hypre_StructMatrix *RAP; hypre_Index *RAP_stencil_shape; hypre_StructStencil *RAP_stencil; HYPRE_Int RAP_stencil_size; HYPRE_Int RAP_stencil_dim; HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1}; hypre_StructStencil *A_stencil; HYPRE_Int A_stencil_size; hypre_Index index_temp; HYPRE_Int k, j, i; HYPRE_Int stencil_rank; RAP_stencil_dim = 3; A_stencil = hypre_StructMatrixStencil(A); A_stencil_size = hypre_StructStencilSize(A_stencil); /*----------------------------------------------------------------------- * Define RAP_stencil *-----------------------------------------------------------------------*/ stencil_rank = 0; /*----------------------------------------------------------------------- * non-symmetric case *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * 7-point fine grid stencil produces 19 point RAP * * Store all 27 elements except for the corners. * * For symmetric A, only store the lower triangular part, where * lower triangular means the lower triangular part on the matrix * in the standard lexicographic ordering. *-----------------------------------------------------------------------*/ if( A_stencil_size == 7) { RAP_stencil_size = 19; if (hypre_StructMatrixSymmetric(A)) { RAP_stencil_size = (RAP_stencil_size + 1) / 2; } RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (k = -1; k < 2; k++) { for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { if ((i*j*k == 0) && (stencil_rank < RAP_stencil_size)) { hypre_SetIndex3(index_temp,i,j,k); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } } /*----------------------------------------------------------------------- * 19 or 27 point fine grid stencil produces 27 point RAP * * Store all 27 elements * * For symmetric A, only store the lower triangular part, where * lower triangular means the lower triangular part on the matrix * in the standard lexicographic ordering. *-----------------------------------------------------------------------*/ else { RAP_stencil_size = 27; if (hypre_StructMatrixSymmetric(A)) { RAP_stencil_size = (RAP_stencil_size + 1) / 2; } RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (k = -1; k < 2; k++) { for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { if (stencil_rank < RAP_stencil_size) { hypre_SetIndex3(index_temp,i,j,k); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } } RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size, RAP_stencil_shape); RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), coarse_grid, RAP_stencil); hypre_StructStencilDestroy(RAP_stencil); /*----------------------------------------------------------------------- * Coarse operator in symmetric iff fine operator is *-----------------------------------------------------------------------*/ hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A); /*----------------------------------------------------------------------- * Set number of ghost points - one one each boundary *-----------------------------------------------------------------------*/ hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost); return RAP; } /*-------------------------------------------------------------------------- * Routines to build RAP. These routines are fairly general * 1) No assumptions about symmetry of A * 2) No assumption that R = transpose(P) * 3) 7, 19 or 27-point fine grid A * * I am, however, assuming that the c-to-c interpolation is the identity. * * I've written a two routines - hypre_PFMG3BuildRAPSym to build the lower * triangular part of RAP (including the diagonal) and * hypre_PFMG3BuildRAPNoSym to build the upper triangular part of RAP * (excluding the diagonal). So using symmetric storage, only the first * routine would be called. With full storage both would need to be called. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMG3BuildRAPSym( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; HYPRE_Int fi, ci; HYPRE_Int constant_coefficient; HYPRE_Int constant_coefficient_A; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); hypre_assert( constant_coefficient==0 || constant_coefficient==1 ); hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient ); if (constant_coefficient==1 ) { hypre_assert( constant_coefficient_A==1 ); } else { hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 ); } fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for symmetric 7-point fine grid operator; produces a * symmetric 19-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-south, below-west, * below-center, below-east, below-north, center-south, * center-west, and center-center). *--------------------------------------------------------------*/ case 7: if ( constant_coefficient==1 ) { hypre_PFMG3BuildRAPSym_onebox_FSS07_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPSym_onebox_FSS07_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for symmetric 19-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ case 19: if ( constant_coefficient==1 ) { hypre_PFMG3BuildRAPSym_onebox_FSS19_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPSym_onebox_FSS19_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for symmetric 27-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ default: if ( constant_coefficient==1 ) { hypre_PFMG3BuildRAPSym_onebox_FSS27_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPSym_onebox_FSS27_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (7) and one value of constant_coefficient (0). Within this function there is a test on constant_coefficient_A as well. */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS07_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac; HYPRE_Real *a_bc; HYPRE_Real a_cs_offd, a_cs_offdm1, a_cs_offdp1; HYPRE_Real a_cn_offdm1; HYPRE_Real a_cw_offd, a_cw_offdm1, a_cw_offdp1; HYPRE_Real a_ce_offdm1; HYPRE_Real a_ac_offd, a_ac_offdm1; HYPRE_Real a_bc_offd, a_bc_offdm1, a_bc_offdp1; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 7-point fine grid operator; produces a * symmetric 19-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-south, below-west, * below-center, below-east, below-north, center-south, * center-west, and center-center). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1]; rap_csw[iAc] = 0.0; rap_cse[iAc] = 0.0; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cs_offd = a_cs[iA_offd]; a_cs_offdm1 = a_cs[iA_offdm1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cw_offd = a_cw[iA_offd]; a_cw_offdm1 = a_cw[iA_offdm1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ce_offdm1 = a_ce[iA_offdm1]; a_cn_offdm1 = a_cn[iA_offdm1]; a_bc_offd = a_bc[iA_offd]; a_bc_offdm1 = a_bc[iA_offdm1]; a_bc_offdp1 = a_bc[iA_offdp1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdm1 = a_ac[iA_offdm1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc_offd * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc_offdm1; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs_offd + rb[iR] * a_cs_offdm1 * pb[iP1] + ra[iR] * a_cs_offdp1 * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw_offd + rb[iR] * a_cw_offdm1 * pb[iP1] + ra[iR] * a_cw_offdp1 * pa[iP1]; rap_csw[iAc] = 0.0; rap_cse[iAc] = 0.0; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac_offdm1 + ra[iR] * a_bc_offdp1 + a_bc_offd * pb[iP] + a_ac_offd * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (7) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS07_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac; HYPRE_Real *a_bc; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 7-point fine grid operator; produces a * symmetric 19-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-south, below-west, * below-center, below-east, below-north, center-south, * center-west, and center-center). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1]; rap_csw[iAc] = 0.0; rap_cse[iAc] = 0.0; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (19) and one value of constant_coefficient (0). Within this functions there is a test on constant_coefficient_A as well. */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS19_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_as; HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real a_cs_offd, a_cs_offdm1, a_cs_offdp1; HYPRE_Real a_csw_offd, a_csw_offdm1, a_csw_offdp1; HYPRE_Real a_cse_offd, a_cse_offdm1, a_cse_offdp1; HYPRE_Real a_cn_offdm1, a_cne_offdm1, a_cnw_offdm1; HYPRE_Real a_cw_offd, a_cw_offdm1, a_cw_offdp1; HYPRE_Real a_ce_offdm1; HYPRE_Real a_ac_offd, a_ac_offdm1; HYPRE_Real a_aw_offd, a_aw_offdm1; HYPRE_Real a_as_offd, a_as_offdm1; HYPRE_Real a_bc_offd, a_bc_offdm1, a_bc_offdp1; HYPRE_Real a_be_offd, a_be_offdm1; HYPRE_Real a_bn_offd, a_bn_offdm1; HYPRE_Real a_bw_offd, a_bw_offdm1, a_bw_offdp1; HYPRE_Real a_bs_offd, a_bs_offdm1, a_bs_offdp1; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the lower triangular part. * * rap_csw is pointer for southwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 19-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A==0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1] + rb[iR] * a_bs[iAm1] + a_bs[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_bw[iAm1] + a_bw[iA] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_be[iAm1] + a_be[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1] + rb[iR] * a_bn[iAm1] + a_bn[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw[iA] + rb[iR] * a_csw[iAm1] * pb[iP1] + ra[iR] * a_csw[iAp1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1] + a_bs[iA] * pb[iP1] + a_as[iA] * pa[iP1] + rb[iR] * a_as[iAm1] + ra[iR] * a_bs[iAp1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse[iA] + rb[iR] * a_cse[iAm1] * pb[iP1] + ra[iR] * a_cse[iAp1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + a_bw[iA] * pb[iP1] + a_aw[iA] * pa[iP1] + rb[iR] * a_aw[iAm1] + ra[iR] * a_bw[iAp1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cs_offd = a_cs[iA_offd]; a_cs_offdm1 = a_cs[iA_offdm1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cw_offd = a_cw[iA_offd]; a_cw_offdm1 = a_cw[iA_offdm1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ce_offdm1 = a_ce[iA_offdm1]; a_csw_offd = a_csw[iA_offd]; a_csw_offdm1 = a_csw[iA_offdm1]; a_csw_offdp1 = a_csw[iA_offdp1]; a_cse_offd = a_cse[iA_offd]; a_cse_offdm1 = a_cse[iA_offdm1]; a_cse_offdp1 = a_cse[iA_offdp1]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cne_offdm1 = a_cne[iA_offdm1]; a_cnw_offdm1 = a_cnw[iA_offdm1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdm1 = a_ac[iA_offdm1]; a_aw_offd = a_aw[iA_offd]; a_aw_offdm1 = a_aw[iA_offdm1]; a_as_offd = a_as[iA_offd]; a_as_offdm1 = a_as[iA_offdm1]; a_bc_offd = a_bc[iA_offd]; a_bc_offdm1 = a_bc[iA_offdm1]; a_bc_offdp1 = a_bc[iA_offdp1]; a_be_offd = a_be[iA_offd]; a_be_offdm1 = a_be[iA_offdm1]; a_bn_offd = a_bn[iA_offd]; a_bn_offdm1 = a_bn[iA_offdm1]; a_bw_offd = a_bw[iA_offd]; a_bw_offdm1 = a_bw[iA_offdm1]; a_bw_offdp1 = a_bw[iA_offdp1]; a_bs_offd = a_bs[iA_offd]; a_bs_offdm1 = a_bs[iA_offdm1]; a_bs_offdp1 = a_bs[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw_offdm1 * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1] + rb[iR] * a_bs_offdm1 + a_bs_offd * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse_offdm1 * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1] + rb[iR] * a_bw_offdm1 + a_bw_offd * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc_offd * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc_offdm1; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1] + rb[iR] * a_be_offdm1 + a_be_offd * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw_offdm1 * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1] + rb[iR] * a_bn_offdm1 + a_bn_offd * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne_offdm1 * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw_offd + rb[iR] * a_csw_offdm1 * pb[iP1] + ra[iR] * a_csw_offdp1 * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs_offd + rb[iR] * a_cs_offdm1 * pb[iP1] + ra[iR] * a_cs_offdp1 * pa[iP1] + a_bs_offd * pb[iP1] + a_as_offd * pa[iP1] + rb[iR] * a_as_offdm1 + ra[iR] * a_bs_offdp1; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse_offd + rb[iR] * a_cse_offdm1 * pb[iP1] + ra[iR] * a_cse_offdp1 * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw_offd + rb[iR] * a_cw_offdm1 * pb[iP1] + ra[iR] * a_cw_offdp1 * pa[iP1] + a_bw_offd * pb[iP1] + a_aw_offd * pa[iP1] + rb[iR] * a_aw_offdm1 + ra[iR] * a_bw_offdp1; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac_offdm1 + ra[iR] * a_bc_offdp1 + a_bc_offd * pb[iP] + a_ac_offd * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (19) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS19_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_as; HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the lower triangular part. * * rap_csw is pointer for southwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 19-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1] + rb[iR] * a_bs[iAm1] + a_bs[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_bw[iAm1] + a_bw[iA] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_be[iAm1] + a_be[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1] + rb[iR] * a_bn[iAm1] + a_bn[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw[iA] + rb[iR] * a_csw[iAm1] * pb[iP1] + ra[iR] * a_csw[iAp1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1] + a_bs[iA] * pb[iP1] + a_as[iA] * pa[iP1] + rb[iR] * a_as[iAm1] + ra[iR] * a_bs[iAp1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse[iA] + rb[iR] * a_cse[iAm1] * pb[iP1] + ra[iR] * a_cse[iAp1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + a_bw[iA] * pb[iP1] + a_aw[iA] * pa[iP1] + rb[iR] * a_aw[iAm1] + ra[iR] * a_bw[iAp1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (27) and one value of constant_coefficient (0). Within this functions there is a test on constant_coefficient_A as well. */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS27_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_as; HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *a_asw, *a_ase; HYPRE_Real *a_bsw, *a_bse, *a_bnw, *a_bne; HYPRE_Real a_cs_offd, a_cs_offdm1, a_cs_offdp1; HYPRE_Real a_csw_offd, a_csw_offdm1, a_csw_offdp1; HYPRE_Real a_cse_offd, a_cse_offdm1, a_cse_offdp1; HYPRE_Real a_cn_offdm1, a_cne_offdm1, a_cnw_offdm1; HYPRE_Real a_cw_offd, a_cw_offdm1, a_cw_offdp1; HYPRE_Real a_ce_offdm1; HYPRE_Real a_ac_offd, a_ac_offdm1; HYPRE_Real a_aw_offd, a_aw_offdm1; HYPRE_Real a_as_offd, a_as_offdm1; HYPRE_Real a_asw_offd, a_asw_offdm1; HYPRE_Real a_ase_offd, a_ase_offdm1; HYPRE_Real a_bc_offd, a_bc_offdm1, a_bc_offdp1; HYPRE_Real a_be_offd, a_be_offdm1; HYPRE_Real a_bn_offd, a_bn_offdm1; HYPRE_Real a_bw_offd, a_bw_offdm1, a_bw_offdp1; HYPRE_Real a_bs_offd, a_bs_offdm1, a_bs_offdp1; HYPRE_Real a_bsw_offd, a_bsw_offdm1, a_bsw_offdp1; HYPRE_Real a_bse_offd, a_bse_offdm1, a_bse_offdp1; HYPRE_Real a_bnw_offd, a_bnw_offdm1; HYPRE_Real a_bne_offd, a_bne_offdm1; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point fine grid operator: * * a_asw is pointer for southwest coefficient in plane above * a_ase is pointer for southeast coefficient in plane above * a_anw is pointer for northwest coefficient in plane above * a_ane is pointer for northeast coefficient in plane above * a_bsw is pointer for southwest coefficient in plane below * a_bse is pointer for southeast coefficient in plane below * a_bnw is pointer for northwest coefficient in plane below * a_bne is pointer for northeast coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the lower triangular part. * * rap_csw is pointer for southwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 27-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1] + rb[iR] * a_bsw[iAm1] + a_bsw[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1] + rb[iR] * a_bs[iAm1] + a_bs[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1] + rb[iR] * a_bse[iAm1] + a_bse[iA] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_bw[iAm1] + a_bw[iA] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_be[iAm1] + a_be[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1] + rb[iR] * a_bnw[iAm1] + a_bnw[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1] + rb[iR] * a_bn[iAm1] + a_bn[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1] + rb[iR] * a_bne[iAm1] + a_bne[iA] * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw[iA] + rb[iR] * a_csw[iAm1] * pb[iP1] + ra[iR] * a_csw[iAp1] * pa[iP1] + a_bsw[iA] * pb[iP1] + a_asw[iA] * pa[iP1] + rb[iR] * a_asw[iAm1] + ra[iR] * a_bsw[iAp1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1] + a_bs[iA] * pb[iP1] + a_as[iA] * pa[iP1] + rb[iR] * a_as[iAm1] + ra[iR] * a_bs[iAp1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse[iA] + rb[iR] * a_cse[iAm1] * pb[iP1] + ra[iR] * a_cse[iAp1] * pa[iP1] + a_bse[iA] * pb[iP1] + a_ase[iA] * pa[iP1] + rb[iR] * a_ase[iAm1] + ra[iR] * a_bse[iAp1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + a_bw[iA] * pb[iP1] + a_aw[iA] * pa[iP1] + rb[iR] * a_aw[iAm1] + ra[iR] * a_bw[iAp1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cs_offd = a_cs[iA_offd]; a_cs_offdm1 = a_cs[iA_offdm1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cse_offd = a_cse[iA_offd]; a_cse_offdm1 = a_cse[iA_offdm1]; a_cse_offdp1 = a_cse[iA_offdp1]; a_csw_offd = a_csw[iA_offd]; a_csw_offdm1 = a_csw[iA_offdm1]; a_csw_offdp1 = a_csw[iA_offdp1]; a_cw_offd = a_cw[iA_offd]; a_cw_offdm1 = a_cw[iA_offdm1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cne_offdm1 = a_cne[iA_offdm1]; a_cnw_offdm1 = a_cnw[iA_offdm1]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdm1 = a_ac[iA_offdm1]; a_as_offd = a_as[iA_offd]; a_as_offdm1 = a_as[iA_offdm1]; a_aw_offd = a_aw[iA_offd]; a_aw_offdm1 = a_aw[iA_offdm1]; a_asw_offd = a_asw[iA_offd]; a_asw_offdm1 = a_asw[iA_offdm1]; a_ase_offd = a_ase[iA_offd]; a_ase_offdm1 = a_ase[iA_offdm1]; a_bc_offd = a_bc[iA_offd]; a_bc_offdm1 = a_bc[iA_offdm1]; a_bc_offdp1 = a_bc[iA_offdp1]; a_bs_offd = a_bs[iA_offd]; a_bs_offdm1 = a_bs[iA_offdm1]; a_bs_offdp1 = a_bs[iA_offdp1]; a_bsw_offd = a_bsw[iA_offd]; a_bsw_offdm1 = a_bsw[iA_offdm1]; a_bsw_offdp1 = a_bsw[iA_offdp1]; a_bse_offd = a_bse[iA_offd]; a_bse_offdm1 = a_bse[iA_offdm1]; a_bse_offdp1 = a_bse[iA_offdp1]; a_be_offd = a_be[iA_offd]; a_be_offdm1 = a_be[iA_offdm1]; a_bw_offd = a_bw[iA_offd]; a_bw_offdm1 = a_bw[iA_offdm1]; a_bw_offdp1 = a_bw[iA_offdp1]; a_bn_offd = a_bn[iA_offd]; a_bn_offdm1 = a_bn[iA_offdm1]; a_bnw_offd = a_bnw[iA_offd]; a_bnw_offdm1 = a_bnw[iA_offdm1]; a_bne_offd = a_bne[iA_offd]; a_bne_offdm1 = a_bne[iA_offdm1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw_offdm1 * pa[iP1] + rb[iR] * a_bsw_offdm1 + a_bsw_offd * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs_offdm1 * pa[iP1] + rb[iR] * a_bs_offdm1 + a_bs_offd * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse_offdm1 * pa[iP1] + rb[iR] * a_bse_offdm1 + a_bse_offd * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1] + rb[iR] * a_bw_offdm1 + a_bw_offd * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc_offd * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc_offdm1; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1] + rb[iR] * a_be_offdm1 + a_be_offd * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw_offdm1 * pa[iP1] + rb[iR] * a_bnw_offdm1 + a_bnw_offd * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn_offdm1 * pa[iP1] + rb[iR] * a_bn_offdm1 + a_bn_offd * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne_offdm1 * pa[iP1] + rb[iR] * a_bne_offdm1 + a_bne_offd * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw_offd + rb[iR] * a_csw_offdm1 * pb[iP1] + ra[iR] * a_csw_offdp1 * pa[iP1] + a_bsw_offd * pb[iP1] + a_asw_offd * pa[iP1] + rb[iR] * a_asw_offdm1 + ra[iR] * a_bsw_offdp1; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs_offd + rb[iR] * a_cs_offdm1 * pb[iP1] + ra[iR] * a_cs_offdp1 * pa[iP1] + a_bs_offd * pb[iP1] + a_as_offd * pa[iP1] + rb[iR] * a_as_offdm1 + ra[iR] * a_bs_offdp1; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse_offd + rb[iR] * a_cse_offdm1 * pb[iP1] + ra[iR] * a_cse_offdp1 * pa[iP1] + a_bse_offd * pb[iP1] + a_ase_offd * pa[iP1] + rb[iR] * a_ase_offdm1 + ra[iR] * a_bse_offdp1; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw_offd + rb[iR] * a_cw_offdm1 * pb[iP1] + ra[iR] * a_cw_offdp1 * pa[iP1] + a_bw_offd * pb[iP1] + a_aw_offd * pa[iP1] + rb[iR] * a_aw_offdm1 + ra[iR] * a_bw_offdp1; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac_offdm1 + ra[iR] * a_bc_offdp1 + a_bc_offd * pb[iP] + a_ac_offd * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPSym, for one box, one value of fine_stencil_size (27) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPSym_onebox_FSS27_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_as; HYPRE_Real *a_bc, *a_bw, *a_be, *a_bs, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *a_asw, *a_ase; HYPRE_Real *a_bsw, *a_bse, *a_bnw, *a_bne; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_bc, *rap_bw, *rap_be, *rap_bs, *rap_bn; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Real *rap_bsw, *rap_bse, *rap_bnw, *rap_bne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); a_bc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); a_bw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); a_bs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point fine grid operator: * * a_asw is pointer for southwest coefficient in plane above * a_ase is pointer for southeast coefficient in plane above * a_anw is pointer for northwest coefficient in plane above * a_ane is pointer for northeast coefficient in plane above * a_bsw is pointer for southwest coefficient in plane below * a_bse is pointer for southeast coefficient in plane below * a_bnw is pointer for northwest coefficient in plane below * a_bne is pointer for northeast coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); a_bsw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); a_bse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); rap_bc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,-1); MapIndex(index_temp, cdir, index); rap_bw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); rap_be = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,-1); MapIndex(index_temp, cdir, index); rap_bs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); rap_bn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the lower triangular part. * * rap_csw is pointer for southwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,-1); MapIndex(index_temp, cdir, index); rap_bsw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,-1); MapIndex(index_temp, cdir, index); rap_bse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); rap_bnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); rap_bne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*-------------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 27-point fine grid operator; produces a * symmetric 27-point coarse grid operator. We calculate only the * lower triangular stencil entries: (below-southwest, below-south, * below-southeast, below-west, below-center, below-east, * below-northwest, below-north, below-northeast, center-southwest, * center-south, center-southeast, center-west, and center-center). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP - zOffsetP - yOffsetP - xOffsetP; rap_bsw[iAc] = rb[iR] * a_csw[iAm1] * pa[iP1] + rb[iR] * a_bsw[iAm1] + a_bsw[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP; rap_bs[iAc] = rb[iR] * a_cs[iAm1] * pa[iP1] + rb[iR] * a_bs[iAm1] + a_bs[iA] * pa[iP1]; iP1 = iP - zOffsetP - yOffsetP + xOffsetP; rap_bse[iAc] = rb[iR] * a_cse[iAm1] * pa[iP1] + rb[iR] * a_bse[iAm1] + a_bse[iA] * pa[iP1]; iP1 = iP - zOffsetP - xOffsetP; rap_bw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_bw[iAm1] + a_bw[iA] * pa[iP1]; iP1 = iP - zOffsetP; rap_bc[iAc] = a_bc[iA] * pa[iP1] + rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_bc[iAm1]; iP1 = iP - zOffsetP + xOffsetP; rap_be[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_be[iAm1] + a_be[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP - xOffsetP; rap_bnw[iAc] = rb[iR] * a_cnw[iAm1] * pa[iP1] + rb[iR] * a_bnw[iAm1] + a_bnw[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP; rap_bn[iAc] = rb[iR] * a_cn[iAm1] * pa[iP1] + rb[iR] * a_bn[iAm1] + a_bn[iA] * pa[iP1]; iP1 = iP - zOffsetP + yOffsetP + xOffsetP; rap_bne[iAc] = rb[iR] * a_cne[iAm1] * pa[iP1] + rb[iR] * a_bne[iAm1] + a_bne[iA] * pa[iP1]; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = a_csw[iA] + rb[iR] * a_csw[iAm1] * pb[iP1] + ra[iR] * a_csw[iAp1] * pa[iP1] + a_bsw[iA] * pb[iP1] + a_asw[iA] * pa[iP1] + rb[iR] * a_asw[iAm1] + ra[iR] * a_bsw[iAp1]; iP1 = iP - yOffsetP; rap_cs[iAc] = a_cs[iA] + rb[iR] * a_cs[iAm1] * pb[iP1] + ra[iR] * a_cs[iAp1] * pa[iP1] + a_bs[iA] * pb[iP1] + a_as[iA] * pa[iP1] + rb[iR] * a_as[iAm1] + ra[iR] * a_bs[iAp1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = a_cse[iA] + rb[iR] * a_cse[iAm1] * pb[iP1] + ra[iR] * a_cse[iAp1] * pa[iP1] + a_bse[iA] * pb[iP1] + a_ase[iA] * pa[iP1] + rb[iR] * a_ase[iAm1] + ra[iR] * a_bse[iAp1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + a_bw[iA] * pb[iP1] + a_aw[iA] * pa[iP1] + rb[iR] * a_aw[iAm1] + ra[iR] * a_bw[iAp1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_ac[iAm1] + ra[iR] * a_bc[iAp1] + a_bc[iA] * pb[iP] + a_ac[iA] * pa[iP]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMG3BuildRAPNoSym( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; HYPRE_Int fi, ci; HYPRE_Int constant_coefficient; HYPRE_Int constant_coefficient_A; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); hypre_assert( constant_coefficient==0 || constant_coefficient==1 ); hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient ); if (constant_coefficient==1 ) { hypre_assert( constant_coefficient_A==1 ); } else { hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 ); } fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for 7-point fine grid operator; produces upper triangular * part of 19-point coarse grid operator. stencil entries: * (above-north, above-east, above-center, above-west, * above-south, center-north, and center-east). *--------------------------------------------------------------*/ case 7: if ( constant_coefficient == 1 ) { hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for 19-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ case 19: if ( constant_coefficient == 1 ) { hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for 27-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ default: if ( constant_coefficient == 1 ) { hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (07) and one value of constant_coefficient (0). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac; HYPRE_Real a_cn_offd, a_cn_offdm1, a_cn_offdp1; HYPRE_Real a_ce_offd, a_ce_offdm1, a_ce_offdp1; HYPRE_Real a_cs_offdp1, a_cw_offdp1; HYPRE_Real a_ac_offd, a_ac_offdp1; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); /* fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } */ cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 7-point fine grid operator; produces upper triangular * part of 19-point coarse grid operator. stencil entries: * (above-north, above-east, above-center, above-west, * above-south, center-north, and center-east). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1]; rap_cnw[iAc] = 0.0; rap_cne[iAc] = 0.0; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cn_offdp1 = a_cn[iA_offdp1]; a_ce_offd = a_ce[iA_offd]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ce_offdp1 = a_ce[iA_offdp1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdp1 = a_ac[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac_offd * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac_offdp1; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn_offd + rb[iR] * a_cn_offdm1 * pb[iP1] + ra[iR] * a_cn_offdp1 * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce_offd + rb[iR] * a_ce_offdm1 * pb[iP1] + ra[iR] * a_ce_offdp1 * pa[iP1]; rap_cnw[iAc] = 0.0; rap_cne[iAc] = 0.0; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (07) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS07_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 7-point fine grid operator; produces upper triangular * part of 19-point coarse grid operator. stencil entries: * (above-north, above-east, above-center, above-west, * above-south, center-north, and center-east). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1]; rap_cnw[iAc] = 0.0; rap_cne[iAc] = 0.0; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (19) and one value of constant_coefficient (0). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an; HYPRE_Real *a_be, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real a_cn_offd, a_cn_offdm1, a_cn_offdp1; HYPRE_Real a_ce_offd, a_ce_offdm1, a_ce_offdp1; HYPRE_Real a_cs_offdp1, a_cw_offdp1, a_cse_offdp1, a_csw_offdp1; HYPRE_Real a_cne_offd, a_cne_offdm1, a_cne_offdp1; HYPRE_Real a_cnw_offd, a_cnw_offdm1, a_cnw_offdp1; HYPRE_Real a_ac_offd, a_ac_offdp1; HYPRE_Real a_an_offd, a_an_offdm1, a_an_offdp1; HYPRE_Real a_as_offd, a_as_offdp1; HYPRE_Real a_aw_offd, a_aw_offdp1; HYPRE_Real a_ae_offd, a_ae_offdm1, a_ae_offdp1; HYPRE_Real a_be_offd, a_be_offdp1; HYPRE_Real a_bn_offd, a_bn_offdp1; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); /* fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } */ cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the upper triangular part. * * rap_cnw is pointer for northwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 19-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1] + ra[iR] * a_an[iAp1] + a_an[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_ae[iAp1] + a_ae[iA] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_aw[iAp1] + a_aw[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1] + ra[iR] * a_as[iAp1] + a_as[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne[iA] + rb[iR] * a_cne[iAm1] * pb[iP1] + ra[iR] * a_cne[iAp1] * pa[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1] + a_bn[iA] * pb[iP1] + a_an[iA] * pa[iP1] + rb[iR] * a_an[iAm1] + ra[iR] * a_bn[iAp1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw[iA] + rb[iR] * a_cnw[iAm1] * pb[iP1] + ra[iR] * a_cnw[iAp1] * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + a_be[iA] * pb[iP1] + a_ae[iA] * pa[iP1] + rb[iR] * a_ae[iAm1] + ra[iR] * a_be[iAp1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cn_offdp1 = a_cn[iA_offdp1]; a_cne_offd = a_cne[iA_offd]; a_cne_offdm1 = a_cne[iA_offdm1]; a_cne_offdp1 = a_cne[iA_offdp1]; a_cnw_offd = a_cnw[iA_offd]; a_cnw_offdm1 = a_cnw[iA_offdm1]; a_cnw_offdp1 = a_cnw[iA_offdp1]; a_ce_offd = a_ce[iA_offd]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ce_offdp1 = a_ce[iA_offdp1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cse_offdp1 = a_cse[iA_offdp1]; a_csw_offdp1 = a_csw[iA_offdp1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdp1 = a_ac[iA_offdp1]; a_an_offd = a_an[iA_offd]; a_an_offdm1 = a_an[iA_offdm1]; a_an_offdp1 = a_an[iA_offdp1]; a_as_offd = a_as[iA_offd]; a_as_offdp1 = a_as[iA_offdp1]; a_aw_offd = a_aw[iA_offd]; a_aw_offdp1 = a_aw[iA_offdp1]; a_ae_offd = a_ae[iA_offd]; a_ae_offdm1 = a_ae[iA_offdm1]; a_ae_offdp1 = a_ae[iA_offdp1]; a_be_offd = a_be[iA_offd]; a_be_offdp1 = a_be[iA_offdp1]; a_bn_offd = a_bn[iA_offd]; a_bn_offdp1 = a_bn[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne_offdp1 * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1] + ra[iR] * a_an_offdp1 + a_an_offd * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw_offdp1 * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1] + ra[iR] * a_ae_offdp1 + a_ae_offd * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac_offd * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac_offdp1; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1] + ra[iR] * a_aw_offdp1 + a_aw_offd * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse_offdp1 * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1] + ra[iR] * a_as_offdp1 + a_as_offd * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw_offdp1 * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne_offd + rb[iR] * a_cne_offdm1 * pb[iP1] + ra[iR] * a_cne_offdp1 * pa[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn_offd + rb[iR] * a_cn_offdm1 * pb[iP1] + ra[iR] * a_cn_offdp1 * pa[iP1] + a_bn_offd * pb[iP1] + a_an_offd * pa[iP1] + rb[iR] * a_an_offdm1 + ra[iR] * a_bn_offdp1; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw_offd + rb[iR] * a_cnw_offdm1 * pb[iP1] + ra[iR] * a_cnw_offdp1 * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce_offd + rb[iR] * a_ce_offdm1 * pb[iP1] + ra[iR] * a_ce_offdp1 * pa[iP1] + a_be_offd * pb[iP1] + a_ae_offd * pa[iP1] + rb[iR] * a_ae_offdm1 + ra[iR] * a_be_offdp1; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (19) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS19_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an; HYPRE_Real *a_be, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the upper triangular part. * * rap_cnw is pointer for northwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 19-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1] + ra[iR] * a_an[iAp1] + a_an[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_ae[iAp1] + a_ae[iA] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_aw[iAp1] + a_aw[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1] + ra[iR] * a_as[iAp1] + a_as[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne[iA] + rb[iR] * a_cne[iAm1] * pb[iP1] + ra[iR] * a_cne[iAp1] * pa[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1] + a_bn[iA] * pb[iP1] + a_an[iA] * pa[iP1] + rb[iR] * a_an[iAm1] + ra[iR] * a_bn[iAp1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw[iA] + rb[iR] * a_cnw[iAm1] * pb[iP1] + ra[iR] * a_cnw[iAp1] * pa[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + a_be[iA] * pb[iP1] + a_ae[iA] * pa[iP1] + rb[iR] * a_ae[iAm1] + ra[iR] * a_be[iAp1]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (27) and one value of constant_coefficient (0). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an; HYPRE_Real *a_be, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *a_asw, *a_ase, *a_anw, *a_ane; HYPRE_Real *a_bnw, *a_bne; HYPRE_Real a_cn_offd, a_cn_offdm1, a_cn_offdp1; HYPRE_Real a_ce_offd, a_ce_offdm1, a_ce_offdp1; HYPRE_Real a_cs_offdp1, a_cw_offdp1, a_cse_offdp1, a_csw_offdp1; HYPRE_Real a_cne_offd, a_cne_offdm1, a_cne_offdp1; HYPRE_Real a_cnw_offd, a_cnw_offdm1, a_cnw_offdp1; HYPRE_Real a_ac_offd, a_ac_offdp1; HYPRE_Real a_an_offd, a_an_offdm1, a_an_offdp1; HYPRE_Real a_ane_offd, a_ane_offdm1, a_ane_offdp1; HYPRE_Real a_anw_offd, a_anw_offdm1, a_anw_offdp1; HYPRE_Real a_as_offd, a_as_offdp1; HYPRE_Real a_ase_offd, a_ase_offdp1, a_asw_offd, a_asw_offdp1; HYPRE_Real a_aw_offd, a_aw_offdp1; HYPRE_Real a_ae_offd, a_ae_offdm1, a_ae_offdp1; HYPRE_Real a_be_offd, a_be_offdp1; HYPRE_Real a_bn_offd, a_bn_offdp1; HYPRE_Real a_bne_offd, a_bne_offdp1, a_bnw_offd, a_bnw_offdp1; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int zOffsetA_diag; HYPRE_Int zOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); /* fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } */ cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point fine grid operator: * * a_asw is pointer for southwest coefficient in plane above * a_ase is pointer for southeast coefficient in plane above * a_anw is pointer for northwest coefficient in plane above * a_ane is pointer for northeast coefficient in plane above * a_bsw is pointer for southwest coefficient in plane below * a_bse is pointer for southeast coefficient in plane below * a_bnw is pointer for northwest coefficient in plane below * a_bne is pointer for northeast coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the upper triangular part. * * rap_cnw is pointer for northwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { zOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { zOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); zOffsetA_offd = 0; } hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 27-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1] + ra[iR] * a_ane[iAp1] + a_ane[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1] + ra[iR] * a_an[iAp1] + a_an[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1] + ra[iR] * a_anw[iAp1] + a_anw[iA] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_ae[iAp1] + a_ae[iA] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_aw[iAp1] + a_aw[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1] + ra[iR] * a_ase[iAp1] + a_ase[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1] + ra[iR] * a_as[iAp1] + a_as[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1] + ra[iR] * a_asw[iAp1] + a_asw[iA] * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne[iA] + rb[iR] * a_cne[iAm1] * pb[iP1] + ra[iR] * a_cne[iAp1] * pa[iP1] + a_bne[iA] * pb[iP1] + a_ane[iA] * pa[iP1] + rb[iR] * a_ane[iAm1] + ra[iR] * a_bne[iAp1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1] + a_bn[iA] * pb[iP1] + a_an[iA] * pa[iP1] + rb[iR] * a_an[iAm1] + ra[iR] * a_bn[iAp1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw[iA] + rb[iR] * a_cnw[iAm1] * pb[iP1] + ra[iR] * a_cnw[iAp1] * pa[iP1] + a_bnw[iA] * pb[iP1] + a_anw[iA] * pa[iP1] + rb[iR] * a_anw[iAm1] + ra[iR] * a_bnw[iAp1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + a_be[iA] * pb[iP1] + a_ae[iA] * pa[iP1] + rb[iR] * a_ae[iAm1] + ra[iR] * a_be[iAp1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - zOffsetA_offd; iA_offdp1 = iA_offd + zOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cn_offdp1 = a_cn[iA_offdp1]; a_cne_offd = a_cne[iA_offd]; a_cne_offdm1 = a_cne[iA_offdm1]; a_cne_offdp1 = a_cne[iA_offdp1]; a_cnw_offd = a_cnw[iA_offd]; a_cnw_offdm1 = a_cnw[iA_offdm1]; a_cnw_offdp1 = a_cnw[iA_offdp1]; a_ce_offd = a_ce[iA_offd]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ce_offdp1 = a_ce[iA_offdp1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cse_offdp1 = a_cse[iA_offdp1]; a_csw_offdp1 = a_csw[iA_offdp1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ac_offd = a_ac[iA_offd]; a_ac_offdp1 = a_ac[iA_offdp1]; a_an_offd = a_an[iA_offd]; a_an_offdm1 = a_an[iA_offdm1]; a_an_offdp1 = a_an[iA_offdp1]; a_ane_offd = a_ane[iA_offd]; a_ane_offdm1 = a_ane[iA_offdm1]; a_ane_offdp1 = a_ane[iA_offdp1]; a_anw_offd = a_anw[iA_offd]; a_anw_offdm1 = a_anw[iA_offdm1]; a_anw_offdp1 = a_anw[iA_offdp1]; a_ae_offd = a_ae[iA_offd]; a_ae_offdm1 = a_ae[iA_offdm1]; a_ae_offdp1 = a_ae[iA_offdp1]; a_aw_offd = a_aw[iA_offd]; a_aw_offdp1 = a_aw[iA_offdp1]; a_as_offd = a_as[iA_offd]; a_as_offdp1 = a_as[iA_offdp1]; a_ase_offd = a_ase[iA_offd]; a_ase_offdp1 = a_ase[iA_offdp1]; a_asw_offd = a_asw[iA_offd]; a_asw_offdp1 = a_asw[iA_offdp1]; a_bn_offd = a_bn[iA_offd]; a_bn_offdp1 = a_bn[iA_offdp1]; a_bne_offd = a_bne[iA_offd]; a_bne_offdp1 = a_bne[iA_offdp1]; a_bnw_offd = a_bnw[iA_offd]; a_bnw_offdp1 = a_bnw[iA_offdp1]; a_be_offd = a_be[iA_offd]; a_be_offdp1 = a_be[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - zOffsetA_diag; iAp1 = iA + zOffsetA_diag; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne_offdp1 * pb[iP1] + ra[iR] * a_ane_offdp1 + a_ane_offd * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn_offdp1 * pb[iP1] + ra[iR] * a_an_offdp1 + a_an_offd * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw_offdp1 * pb[iP1] + ra[iR] * a_anw_offdp1 + a_anw_offd * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1] + ra[iR] * a_ae_offdp1 + a_ae_offd * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac_offd * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac_offdp1; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1] + ra[iR] * a_aw_offdp1 + a_aw_offd * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse_offdp1 * pb[iP1] + ra[iR] * a_ase_offdp1 + a_ase_offd * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs_offdp1 * pb[iP1] + ra[iR] * a_as_offdp1 + a_as_offd * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw_offdp1 * pb[iP1] + ra[iR] * a_asw_offdp1 + a_asw_offd * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne_offd + rb[iR] * a_cne_offdm1 * pb[iP1] + ra[iR] * a_cne_offdp1 * pa[iP1] + a_bne_offd * pb[iP1] + a_ane_offd * pa[iP1] + rb[iR] * a_ane_offdm1 + ra[iR] * a_bne_offdp1; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn_offd + rb[iR] * a_cn_offdm1 * pb[iP1] + ra[iR] * a_cn_offdp1 * pa[iP1] + a_bn_offd * pb[iP1] + a_an_offd * pa[iP1] + rb[iR] * a_an_offdm1 + ra[iR] * a_bn_offdp1; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw_offd + rb[iR] * a_cnw_offdm1 * pb[iP1] + ra[iR] * a_cnw_offdp1 * pa[iP1] + a_bnw_offd * pb[iP1] + a_anw_offd * pa[iP1] + rb[iR] * a_anw_offdm1 + ra[iR] * a_bnw_offdp1; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce_offd + rb[iR] * a_ce_offdm1 * pb[iP1] + ra[iR] * a_ce_offdp1 * pa[iP1] + a_be_offd * pb[iP1] + a_ae_offd * pa[iP1] + rb[iR] * a_ae_offdm1 + ra[iR] * a_be_offdp1; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* core part of hypre_PFMG3BuildRAPNoSym, for one box, one value of fine_stencil_size (27) and one value of constant_coefficient (1). */ HYPRE_Int hypre_PFMG3BuildRAPNoSym_onebox_FSS27_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_ac, *a_aw, *a_ae, *a_as, *a_an; HYPRE_Real *a_be, *a_bn; HYPRE_Real *a_csw, *a_cse, *a_cnw, *a_cne; HYPRE_Real *a_asw, *a_ase, *a_anw, *a_ane; HYPRE_Real *a_bnw, *a_bne; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_ac, *rap_aw, *rap_ae, *rap_as, *rap_an; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Real *rap_asw, *rap_ase, *rap_anw, *rap_ane; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int zOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; HYPRE_Int zOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,-1); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 7-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient in same plane * a_ce is pointer for east coefficient in same plane * a_cs is pointer for south coefficient in same plane * a_cn is pointer for north coefficient in same plane * a_ac is pointer for center coefficient in plane above * a_bc is pointer for center coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); a_ac = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 19-point fine grid operator: * * a_aw is pointer for west coefficient in plane above * a_ae is pointer for east coefficient in plane above * a_as is pointer for south coefficient in plane above * a_an is pointer for north coefficient in plane above * a_bw is pointer for west coefficient in plane below * a_be is pointer for east coefficient in plane below * a_bs is pointer for south coefficient in plane below * a_bn is pointer for north coefficient in plane below * a_csw is pointer for southwest coefficient in same plane * a_cse is pointer for southeast coefficient in same plane * a_cnw is pointer for northwest coefficient in same plane * a_cne is pointer for northeast coefficient in same plane *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); a_aw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); a_ae = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); a_as = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); a_an = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,-1); MapIndex(index_temp, cdir, index); a_be = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,-1); MapIndex(index_temp, cdir, index); a_bn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point fine grid operator: * * a_asw is pointer for southwest coefficient in plane above * a_ase is pointer for southeast coefficient in plane above * a_anw is pointer for northwest coefficient in plane above * a_ane is pointer for northeast coefficient in plane above * a_bsw is pointer for southwest coefficient in plane below * a_bse is pointer for southeast coefficient in plane below * a_bnw is pointer for northwest coefficient in plane below * a_bne is pointer for northeast coefficient in plane below *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); a_asw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); a_ase = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); a_anw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); a_ane = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,-1); MapIndex(index_temp, cdir, index); a_bnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,-1); MapIndex(index_temp, cdir, index); a_bne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for 19-point coarse grid operator: * * We build only the upper triangular part (excluding diagonal). * * rap_ce is pointer for east coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); rap_ac = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,1); MapIndex(index_temp, cdir, index); rap_aw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,0,1); MapIndex(index_temp, cdir, index); rap_ae = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,1); MapIndex(index_temp, cdir, index); rap_as = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,1); MapIndex(index_temp, cdir, index); rap_an = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Extract additional pointers for 27-point coarse grid operator: * * A 27-point coarse grid operator is produced when the fine grid * stencil is 19 or 27 point. * * We build only the upper triangular part. * * rap_cnw is pointer for northwest coefficient in same plane (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,1); MapIndex(index_temp, cdir, index); rap_asw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,1); MapIndex(index_temp, cdir, index); rap_ase = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,1); MapIndex(index_temp, cdir, index); rap_anw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,1); MapIndex(index_temp, cdir, index); rap_ane = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,1); MapIndex(index_temp, cdir, index); zOffsetA = 0; zOffsetP = 0; hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 27-point fine grid operator; produces upper triangular * part of 27-point coarse grid operator. stencil entries: * (above-northeast, above-north, above-northwest, above-east, * above-center, above-west, above-southeast, above-south, * above-southwest, center-northeast, center-north, * center-northwest, and center-east). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - zOffsetA; iAp1 = iA + zOffsetA; iP1 = iP + zOffsetP + yOffsetP + xOffsetP; rap_ane[iAc] = ra[iR] * a_cne[iAp1] * pb[iP1] + ra[iR] * a_ane[iAp1] + a_ane[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP; rap_an[iAc] = ra[iR] * a_cn[iAp1] * pb[iP1] + ra[iR] * a_an[iAp1] + a_an[iA] * pb[iP1]; iP1 = iP + zOffsetP + yOffsetP - xOffsetP; rap_anw[iAc] = ra[iR] * a_cnw[iAp1] * pb[iP1] + ra[iR] * a_anw[iAp1] + a_anw[iA] * pb[iP1]; iP1 = iP + zOffsetP + xOffsetP; rap_ae[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_ae[iAp1] + a_ae[iA] * pb[iP1]; iP1 = iP + zOffsetP; rap_ac[iAc] = a_ac[iA] * pb[iP1] + ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_ac[iAp1]; iP1 = iP + zOffsetP - xOffsetP; rap_aw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_aw[iAp1] + a_aw[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP + xOffsetP; rap_ase[iAc] = ra[iR] * a_cse[iAp1] * pb[iP1] + ra[iR] * a_ase[iAp1] + a_ase[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP; rap_as[iAc] = ra[iR] * a_cs[iAp1] * pb[iP1] + ra[iR] * a_as[iAp1] + a_as[iA] * pb[iP1]; iP1 = iP + zOffsetP - yOffsetP - xOffsetP; rap_asw[iAc] = ra[iR] * a_csw[iAp1] * pb[iP1] + ra[iR] * a_asw[iAp1] + a_asw[iA] * pb[iP1]; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = a_cne[iA] + rb[iR] * a_cne[iAm1] * pb[iP1] + ra[iR] * a_cne[iAp1] * pa[iP1] + a_bne[iA] * pb[iP1] + a_ane[iA] * pa[iP1] + rb[iR] * a_ane[iAm1] + ra[iR] * a_bne[iAp1]; iP1 = iP + yOffsetP; rap_cn[iAc] = a_cn[iA] + rb[iR] * a_cn[iAm1] * pb[iP1] + ra[iR] * a_cn[iAp1] * pa[iP1] + a_bn[iA] * pb[iP1] + a_an[iA] * pa[iP1] + rb[iR] * a_an[iAm1] + ra[iR] * a_bn[iAp1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = a_cnw[iA] + rb[iR] * a_cnw[iAm1] * pb[iP1] + ra[iR] * a_cnw[iAp1] * pa[iP1] + a_bnw[iA] * pb[iP1] + a_anw[iA] * pa[iP1] + rb[iR] * a_anw[iAm1] + ra[iR] * a_bnw[iAp1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + a_be[iA] * pb[iP1] + a_ae[iA] * pa[iP1] + rb[iR] * a_ae[iAm1] + ra[iR] * a_be[iAp1]; /* }*/ /* end ForBoxI */ return hypre_error_flag; }
papi_hl.c
/****************************/ /* THIS IS OPEN SOURCE CODE */ /****************************/ /** * @file papi_hl.c * @author Frank Winkler * frank.winkler@icl.utk.edu * @author Philip Mucci * mucci@cs.utk.edu * @brief This file contains the 'high level' interface to PAPI. * BASIC is a high level language. ;-) */ #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <pthread.h> #include <search.h> #include <sys/types.h> #include <sys/stat.h> #include <errno.h> #include <time.h> #include <stdint.h> #include <unistd.h> #include "papi.h" #include "papi_internal.h" /* For dynamic linking to libpapi */ /* Weak symbol for pthread_once to avoid additional linking * against libpthread when not used. */ #pragma weak pthread_once #define verbose_fprintf \ if (verbosity == 1) fprintf /* defaults for number of components and events */ #define PAPIHL_NUM_OF_COMPONENTS 10 #define PAPIHL_NUM_OF_EVENTS_PER_COMPONENT 10 #define PAPIHL_ACTIVE 1 #define PAPIHL_DEACTIVATED 0 /* global components data begin *****************************************/ typedef struct components { int component_id; int num_of_events; int max_num_of_events; char **event_names; int *event_codes; short *event_types; int EventSet; //only for testing at initialization phase } components_t; components_t *components = NULL; int num_of_components = 0; int max_num_of_components = PAPIHL_NUM_OF_COMPONENTS; int total_num_events = 0; int num_of_cleaned_threads = 0; /* global components data end *******************************************/ /* thread local components data begin ***********************************/ typedef struct local_components { int EventSet; /** Return values for the eventsets */ long_long *values; } local_components_t; PAPI_TLS_KEYWORD local_components_t *_local_components = NULL; PAPI_TLS_KEYWORD long_long _local_cycles; PAPI_TLS_KEYWORD volatile bool _local_state = PAPIHL_ACTIVE; PAPI_TLS_KEYWORD int _local_region_begin_cnt = 0; /**< Count each PAPI_hl_region_begin call */ PAPI_TLS_KEYWORD int _local_region_end_cnt = 0; /**< Count each PAPI_hl_region_end call */ /* thread local components data end *************************************/ /* global event storage data begin **************************************/ typedef struct reads { struct reads *next; struct reads *prev; long_long value; /**< Event value */ } reads_t; typedef struct { long_long offset; /**< Event value for region_begin */ long_long total; /**< Event value for region_end - region_begin + previous value */ reads_t *read_values; /**< List of read event values inside a region */ } value_t; typedef struct regions { char *region; /**< Region name */ struct regions *next; struct regions *prev; value_t values[]; /**< Array of event values based on current eventset */ } regions_t; typedef struct { unsigned long key; /**< Thread ID */ regions_t *value; /**< List of regions */ } threads_t; int compar(const void *l, const void *r) { const threads_t *lm = l; const threads_t *lr = r; return lm->key - lr->key; } typedef struct { void *root; /**< Root of binary tree */ threads_t *find_p; /**< Pointer that is used for finding a thread node */ } binary_tree_t; /**< Global binary tree that stores events from all threads */ binary_tree_t* binary_tree = NULL; /* global event storage data end ****************************************/ /* global auxiliary variables begin *************************************/ enum region_type { REGION_BEGIN, REGION_READ, REGION_END }; char **requested_event_names = NULL; /**< Events from user or default */ int num_of_requested_events = 0; bool hl_initiated = false; /**< Check PAPI-HL has been initiated */ bool hl_finalized = false; /**< Check PAPI-HL has been fininalized */ bool events_determined = false; /**< Check if events are determined */ bool output_generated = false; /**< Check if output has been already generated */ static char *absolute_output_file_path = NULL; static int output_counter = 0; /**< Count each output generation. Not used yet */ short verbosity = 1; /**< Verbose output is always generated */ bool state = PAPIHL_ACTIVE; /**< PAPIHL is active until first error or finalization */ static int region_begin_cnt = 0; /**< Count each PAPI_hl_region_begin call */ static int region_end_cnt = 0; /**< Count each PAPI_hl_region_end call */ unsigned long master_thread_id = -1; /**< Remember id of master thread */ /* global auxiliary variables end ***************************************/ static void _internal_hl_library_init(void); static void _internal_hl_onetime_library_init(void); /* functions for creating eventsets for different components */ static int _internal_hl_checkCounter ( char* counter ); static int _internal_hl_determine_rank(); static char *_internal_hl_remove_spaces( char *str ); static int _internal_hl_determine_default_events(); static int _internal_hl_read_user_events(); static int _internal_hl_new_component(int component_id, components_t *component); static int _internal_hl_add_event_to_component(char *event_name, int event, short event_type, components_t *component); static int _internal_hl_create_components(); static int _internal_hl_read_events(const char* events); static int _internal_hl_create_event_sets(); /* functions for storing events */ static inline reads_t* _internal_hl_insert_read_node( reads_t** head_node ); static inline int _internal_hl_add_values_to_region( regions_t *node, enum region_type reg_typ ); static inline regions_t* _internal_hl_insert_region_node( regions_t** head_node, const char *region ); static inline regions_t* _internal_hl_find_region_node( regions_t* head_node, const char *region ); static inline threads_t* _internal_hl_insert_thread_node( unsigned long tid ); static inline threads_t* _internal_hl_find_thread_node( unsigned long tid ); static int _internal_hl_store_counters( unsigned long tid, const char *region, enum region_type reg_typ ); static int _internal_hl_read_counters(); static int _internal_hl_read_and_store_counters( const char *region, enum region_type reg_typ ); static int _internal_hl_create_global_binary_tree(); /* functions for output generation */ static int _internal_hl_mkdir(const char *dir); static int _internal_hl_determine_output_path(); static void _internal_hl_json_line_break_and_indent(FILE* f, bool b, int width); static void _internal_hl_json_region_events(FILE* f, bool beautifier, regions_t *regions); static void _internal_hl_json_regions(FILE* f, bool beautifier, threads_t* thread_node); static void _internal_hl_json_threads(FILE* f, bool beautifier, unsigned long* tids, int threads_num); static void _internal_hl_write_output(); /* functions for cleaning up heap memory */ static void _internal_hl_clean_up_local_data(); static void _internal_hl_clean_up_global_data(); static void _internal_hl_clean_up_all(bool deactivate); static int _internal_hl_check_for_clean_thread_states(); /* internal advanced functions */ int _internal_PAPI_hl_init(); /**< intialize high level library */ int _internal_PAPI_hl_cleanup_thread(); /**< clean local-thread event sets */ int _internal_PAPI_hl_finalize(); /**< shutdown event sets and clear up everything */ int _internal_PAPI_hl_set_events(const char* events); /**< set specfic events to be recorded */ void _internal_PAPI_hl_print_output(); /**< generate output */ static void _internal_hl_library_init(void) { /* This function is only called by one thread! */ int retval; /* check VERBOSE level */ if ( getenv("PAPI_NO_WARNING") != NULL ) { verbosity = 0; } if ( ( retval = PAPI_library_init(PAPI_VER_CURRENT) ) != PAPI_VER_CURRENT ) verbose_fprintf(stdout, "PAPI-HL Error: PAPI_library_init failed!\n"); /* PAPI_thread_init only suceeds if PAPI_library_init has suceeded */ if ((retval = PAPI_thread_init(&pthread_self)) == PAPI_OK) { /* determine output directory and output file */ if ( ( retval = _internal_hl_determine_output_path() ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: _internal_hl_determine_output_path failed!\n"); state = PAPIHL_DEACTIVATED; verbose_fprintf(stdout, "PAPI-HL Error: PAPI could not be initiated!\n"); } else { /* register the termination function for output */ atexit(_internal_PAPI_hl_print_output); verbose_fprintf(stdout, "PAPI-HL Info: PAPI has been initiated!\n"); /* remember thread id */ master_thread_id = PAPI_thread_id(); HLDBG("master_thread_id=%lu\n", master_thread_id); } } else { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_thread_init failed!\n"); state = PAPIHL_DEACTIVATED; verbose_fprintf(stdout, "PAPI-HL Error: PAPI could not be initiated!\n"); } /* Support multiplexing if user wants to */ if ( getenv("PAPI_MULTIPLEX") != NULL ) { retval = PAPI_multiplex_init(); if ( retval == PAPI_ENOSUPP) { verbose_fprintf(stdout, "PAPI-HL Info: Multiplex is not supported!\n"); } else if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_multiplex_init failed!\n"); } else if ( retval == PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Info: Multiplex has been initiated!\n"); } } hl_initiated = true; } static void _internal_hl_onetime_library_init(void) { static pthread_once_t library_is_initialized = PTHREAD_ONCE_INIT; if ( pthread_once ) { /* we assume that PAPI_hl_init() is called from a parallel region */ pthread_once(&library_is_initialized, _internal_hl_library_init); /* wait until first thread has finished */ int i = 0; /* give it 5 seconds in case PAPI_thread_init crashes */ while ( !hl_initiated && (i++) < 500000 ) usleep(10); } else { /* we assume that PAPI_hl_init() is called from a serial application * that was not linked against libpthread */ _internal_hl_library_init(); } } static int _internal_hl_checkCounter ( char* counter ) { int EventSet = PAPI_NULL; int eventcode; int retval; HLDBG("Counter: %s\n", counter); if ( ( retval = PAPI_create_eventset( &EventSet ) ) != PAPI_OK ) return ( retval ); if ( ( retval = PAPI_event_name_to_code( counter, &eventcode ) ) != PAPI_OK ) { HLDBG("Counter %s does not exist\n", counter); return ( retval ); } if ( ( retval = PAPI_add_event (EventSet, eventcode) ) != PAPI_OK ) { HLDBG("Cannot add counter %s\n", counter); return ( retval ); } if ( ( retval = PAPI_cleanup_eventset (EventSet) ) != PAPI_OK ) return ( retval ); if ( ( retval = PAPI_destroy_eventset (&EventSet) ) != PAPI_OK ) return ( retval ); return ( PAPI_OK ); } static int _internal_hl_determine_rank() { int rank = -1; /* check environment variables for rank identification */ if ( getenv("OMPI_COMM_WORLD_RANK") != NULL ) rank = atoi(getenv("OMPI_COMM_WORLD_RANK")); else if ( getenv("ALPS_APP_PE") != NULL ) rank = atoi(getenv("ALPS_APP_PE")); else if ( getenv("PMI_RANK") != NULL ) rank = atoi(getenv("PMI_RANK")); else if ( getenv("SLURM_PROCID") != NULL ) rank = atoi(getenv("SLURM_PROCID")); return rank; } static char *_internal_hl_remove_spaces( char *str ) { char *out = str, *put = str; for(; *str != '\0'; ++str) { if(*str != ' ') *put++ = *str; } *put = '\0'; return out; } static int _internal_hl_determine_default_events() { int i; HLDBG("Default events\n"); char *default_events[] = { "perf::TASK-CLOCK", "PAPI_TOT_INS", "PAPI_TOT_CYC", "PAPI_FP_INS", "PAPI_FP_OPS" }; int num_of_defaults = sizeof(default_events) / sizeof(char*); /* allocate memory for requested events */ requested_event_names = (char**)malloc(num_of_defaults * sizeof(char*)); if ( requested_event_names == NULL ) return ( PAPI_ENOMEM ); /* check if default events are available on the current machine */ for ( i = 0; i < num_of_defaults; i++ ) { if ( _internal_hl_checkCounter( default_events[i] ) == PAPI_OK ) { requested_event_names[num_of_requested_events++] = strdup(default_events[i]); if ( requested_event_names[num_of_requested_events -1] == NULL ) return ( PAPI_ENOMEM ); } } return ( PAPI_OK ); } static int _internal_hl_read_user_events(const char *user_events) { char* user_events_copy; const char *separator; //separator for events int num_of_req_events = 1; //number of events in string int req_event_index = 0; //index of event const char *position = NULL; //current position in processed string char *token; HLDBG("User events: %s\n", user_events); user_events_copy = strdup(user_events); if ( user_events_copy == NULL ) return ( PAPI_ENOMEM ); /* check if string is not empty */ if ( strlen( user_events_copy ) > 0 ) { /* count number of separator characters */ position = user_events_copy; separator=","; while ( *position ) { if ( strchr( separator, *position ) ) { num_of_req_events++; } position++; } /* allocate memory for requested events */ requested_event_names = (char**)malloc(num_of_req_events * sizeof(char*)); if ( requested_event_names == NULL ) return ( PAPI_ENOMEM ); /* parse list of event names */ token = strtok( user_events_copy, separator ); while ( token ) { if ( req_event_index >= num_of_req_events ){ /* more entries as in the first run */ return PAPI_EINVAL; } requested_event_names[req_event_index] = strdup(_internal_hl_remove_spaces(token)); if ( requested_event_names[req_event_index] == NULL ) return ( PAPI_ENOMEM ); token = strtok( NULL, separator ); req_event_index++; } } num_of_requested_events = num_of_req_events; free(user_events_copy); if ( num_of_requested_events == 0 ) return PAPI_EINVAL; HLDBG("Number of requested events: %d\n", num_of_requested_events); return ( PAPI_OK ); } static int _internal_hl_new_component(int component_id, components_t *component) { int retval; /* create new EventSet */ component->EventSet = PAPI_NULL; if ( ( retval = PAPI_create_eventset( &component->EventSet ) ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: Cannot create EventSet for component %d.\n", component_id); return ( retval ); } /* Support multiplexing if user wants to */ if ( getenv("PAPI_MULTIPLEX") != NULL ) { /* multiplex only for cpu core events */ if ( component_id == 0 ) { retval = PAPI_assign_eventset_component(component->EventSet, component_id); if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_assign_eventset_component failed.\n"); } else { if ( PAPI_get_multiplex(component->EventSet) == false ) { retval = PAPI_set_multiplex(component->EventSet); if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_set_multiplex failed.\n"); } } } } } component->component_id = component_id; component->num_of_events = 0; component->max_num_of_events = PAPIHL_NUM_OF_EVENTS_PER_COMPONENT; component->event_names = NULL; component->event_names = (char**)malloc(component->max_num_of_events * sizeof(char*)); if ( component->event_names == NULL ) return ( PAPI_ENOMEM ); component->event_codes = NULL; component->event_codes = (int*)malloc(component->max_num_of_events * sizeof(int)); if ( component->event_codes == NULL ) return ( PAPI_ENOMEM ); component->event_types = NULL; component->event_types = (short*)malloc(component->max_num_of_events * sizeof(short)); if ( component->event_types == NULL ) return ( PAPI_ENOMEM ); num_of_components += 1; return ( PAPI_OK ); } static int _internal_hl_add_event_to_component(char *event_name, int event, short event_type, components_t *component) { int i, retval; /* check if we need to reallocate memory for event_names, event_codes and event_types */ if ( component->num_of_events == component->max_num_of_events ) { component->max_num_of_events *= 2; component->event_names = (char**)realloc(component->event_names, component->max_num_of_events * sizeof(char*)); if ( component->event_names == NULL ) return ( PAPI_ENOMEM ); component->event_codes = (int*)realloc(component->event_codes, component->max_num_of_events * sizeof(int)); if ( component->event_codes == NULL ) return ( PAPI_ENOMEM ); component->event_types = (short*)realloc(component->event_types, component->max_num_of_events * sizeof(short)); if ( component->event_types == NULL ) return ( PAPI_ENOMEM ); } retval = PAPI_add_event( component->EventSet, event ); if ( retval != PAPI_OK ) { const PAPI_component_info_t* cmpinfo; cmpinfo = PAPI_get_component_info( component->component_id ); verbose_fprintf(stdout, "PAPI-HL Warning: Cannot add %s to component %s.\n", event_name, cmpinfo->name); verbose_fprintf(stdout, "The following event combination is not supported:\n"); for ( i = 0; i < component->num_of_events; i++ ) verbose_fprintf(stdout, " %s\n", component->event_names[i]); verbose_fprintf(stdout, " %s\n", event_name); verbose_fprintf(stdout, "Advice: Use papi_event_chooser to obtain an appropriate event set for this component or set PAPI_MULTIPLEX=1.\n"); return PAPI_EINVAL; } component->event_names[component->num_of_events] = event_name; component->event_codes[component->num_of_events] = event; component->event_types[component->num_of_events] = event_type; component->num_of_events += 1; total_num_events += 1; return PAPI_OK; } static int _internal_hl_create_components() { int i, j, retval, event; int component_id = -1; int comp_index = 0; bool component_exists = false; short event_type = 0; HLDBG("Create components\n"); components = (components_t*)malloc(max_num_of_components * sizeof(components_t)); if ( components == NULL ) return ( PAPI_ENOMEM ); for ( i = 0; i < num_of_requested_events; i++ ) { /* check if requested event contains event type (instant or delta) */ const char sep = '='; char *ret; int index; /* search for '=' in event name */ ret = strchr(requested_event_names[i], sep); if (ret) { if ( strcmp(ret, "=instant") == 0 ) event_type = 1; else event_type = 0; /* get index of '=' in event name */ index = (int)(ret - requested_event_names[i]); /* remove event type from string if '=instant' or '=delta' */ if ( (strcmp(ret, "=instant") == 0) || (strcmp(ret, "=delta") == 0) ) requested_event_names[i][index] = '\0'; } /* check if event is supported on current machine */ retval = _internal_hl_checkCounter(requested_event_names[i]); if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Warning: \"%s\" does not exist or is not supported on this machine.\n", requested_event_names[i]); } else { /* determine event code and corresponding component id */ retval = PAPI_event_name_to_code( requested_event_names[i], &event ); if ( retval != PAPI_OK ) return ( retval ); component_id = PAPI_COMPONENT_INDEX( event ); /* check if component_id already exists in global components structure */ for ( j = 0; j < num_of_components; j++ ) { if ( components[j].component_id == component_id ) { component_exists = true; comp_index = j; break; } else { component_exists = false; } } /* create new component */ if ( false == component_exists ) { /* check if we need to reallocate memory for components */ if ( num_of_components == max_num_of_components ) { max_num_of_components *= 2; components = (components_t*)realloc(components, max_num_of_components * sizeof(components_t)); if ( components == NULL ) return ( PAPI_ENOMEM ); } comp_index = num_of_components; retval = _internal_hl_new_component(component_id, &components[comp_index]); if ( retval != PAPI_OK ) return ( retval ); } /* add event to current component */ retval = _internal_hl_add_event_to_component(requested_event_names[i], event, event_type, &components[comp_index]); if ( retval == PAPI_ENOMEM ) return ( retval ); } } HLDBG("Number of components %d\n", num_of_components); if ( num_of_components > 0 ) verbose_fprintf(stdout, "PAPI-HL Info: Using the following events:\n"); /* destroy all EventSets from global data */ for ( i = 0; i < num_of_components; i++ ) { if ( ( retval = PAPI_cleanup_eventset (components[i].EventSet) ) != PAPI_OK ) return ( retval ); if ( ( retval = PAPI_destroy_eventset (&components[i].EventSet) ) != PAPI_OK ) return ( retval ); components[i].EventSet = PAPI_NULL; HLDBG("component_id = %d\n", components[i].component_id); HLDBG("num_of_events = %d\n", components[i].num_of_events); for ( j = 0; j < components[i].num_of_events; j++ ) { HLDBG(" %s type=%d\n", components[i].event_names[j], components[i].event_types[j]); verbose_fprintf(stdout, " %s\n", components[i].event_names[j]); } } if ( num_of_components == 0 ) return PAPI_EINVAL; return PAPI_OK; } static int _internal_hl_read_events(const char* events) { int i, retval; HLDBG("Read events: %s\n", events); if ( events != NULL ) { if ( _internal_hl_read_user_events(events) != PAPI_OK ) if ( ( retval = _internal_hl_determine_default_events() ) != PAPI_OK ) return ( retval ); /* check if user specified events via environment variable */ } else if ( getenv("PAPI_EVENTS") != NULL ) { char *user_events_from_env = strdup( getenv("PAPI_EVENTS") ); if ( user_events_from_env == NULL ) return ( PAPI_ENOMEM ); if ( _internal_hl_read_user_events(user_events_from_env) != PAPI_OK ) if ( ( retval = _internal_hl_determine_default_events() ) != PAPI_OK ) { free(user_events_from_env); return ( retval ); } free(user_events_from_env); } else { if ( ( retval = _internal_hl_determine_default_events() ) != PAPI_OK ) return ( retval ); } /* create components based on requested events */ if ( _internal_hl_create_components() != PAPI_OK ) { /* requested events do not work at all, use default events */ verbose_fprintf(stdout, "PAPI-HL Warning: All requested events do not work, using default.\n"); for ( i = 0; i < num_of_requested_events; i++ ) free(requested_event_names[i]); free(requested_event_names); num_of_requested_events = 0; if ( ( retval = _internal_hl_determine_default_events() ) != PAPI_OK ) return ( retval ); if ( ( retval = _internal_hl_create_components() ) != PAPI_OK ) return ( retval ); } events_determined = true; return ( PAPI_OK ); } static int _internal_hl_create_event_sets() { int i, j, retval; long_long cycles; if ( state == PAPIHL_ACTIVE ) { /* allocate memory for local components */ _local_components = (local_components_t*)malloc(num_of_components * sizeof(local_components_t)); if ( _local_components == NULL ) return ( PAPI_ENOMEM ); for ( i = 0; i < num_of_components; i++ ) { /* create EventSet */ _local_components[i].EventSet = PAPI_NULL; if ( ( retval = PAPI_create_eventset( &_local_components[i].EventSet ) ) != PAPI_OK ) { return (retval ); } /* Support multiplexing if user wants to */ if ( getenv("PAPI_MULTIPLEX") != NULL ) { /* multiplex only for cpu core events */ if ( components[i].component_id == 0 ) { retval = PAPI_assign_eventset_component(_local_components[i].EventSet, components[i].component_id ); if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_assign_eventset_component failed.\n"); } else { if ( PAPI_get_multiplex(_local_components[i].EventSet) == false ) { retval = PAPI_set_multiplex(_local_components[i].EventSet); if ( retval != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_set_multiplex failed.\n"); } } } } } /* add event to current EventSet */ for ( j = 0; j < components[i].num_of_events; j++ ) { retval = PAPI_add_event( _local_components[i].EventSet, components[i].event_codes[j] ); if ( retval != PAPI_OK ) { return (retval ); } } /* allocate memory for return values */ _local_components[i].values = (long_long*)malloc(components[i].num_of_events * sizeof(long_long)); if ( _local_components[i].values == NULL ) return ( PAPI_ENOMEM ); } for ( i = 0; i < num_of_components; i++ ) { if ( ( retval = PAPI_start( _local_components[i].EventSet ) ) != PAPI_OK ) return (retval ); /* warm up PAPI code paths and data structures */ if ( ( retval = PAPI_read_ts( _local_components[i].EventSet, _local_components[i].values, &cycles ) != PAPI_OK ) ) { return (retval ); } } return PAPI_OK; } return ( PAPI_EMISC ); } static inline reads_t* _internal_hl_insert_read_node(reads_t** head_node) { reads_t *new_node; /* create new region node */ if ( ( new_node = malloc(sizeof(reads_t)) ) == NULL ) return ( NULL ); new_node->next = NULL; new_node->prev = NULL; /* insert node in list */ if ( *head_node == NULL ) { *head_node = new_node; return new_node; } (*head_node)->prev = new_node; new_node->next = *head_node; *head_node = new_node; return new_node; } static inline int _internal_hl_add_values_to_region( regions_t *node, enum region_type reg_typ ) { int i, j; int region_count = 1; int cmp_iter = 2; if ( reg_typ == REGION_BEGIN ) { /* set first fixed counters */ node->values[0].offset = region_count; node->values[1].offset = _local_cycles; /* events from components */ for ( i = 0; i < num_of_components; i++ ) for ( j = 0; j < components[i].num_of_events; j++ ) node->values[cmp_iter++].offset = _local_components[i].values[j]; } else if ( reg_typ == REGION_READ ) { /* create a new read node and add values*/ reads_t* read_node; if ( ( read_node = _internal_hl_insert_read_node(&node->values[1].read_values) ) == NULL ) return ( PAPI_ENOMEM ); read_node->value = _local_cycles - node->values[1].offset; for ( i = 0; i < num_of_components; i++ ) { for ( j = 0; j < components[i].num_of_events; j++ ) { reads_t* read_node; if ( ( read_node = _internal_hl_insert_read_node(&node->values[cmp_iter].read_values) ) == NULL ) return ( PAPI_ENOMEM ); if ( components[i].event_types[j] == 1 ) read_node->value = _local_components[i].values[j]; else read_node->value = _local_components[i].values[j] - node->values[cmp_iter].offset; cmp_iter++; } } } else if ( reg_typ == REGION_END ) { /* determine difference of current value and offset and add previous total value */ node->values[0].total += node->values[0].offset; node->values[1].total += _local_cycles - node->values[1].offset; /* events from components */ for ( i = 0; i < num_of_components; i++ ) for ( j = 0; j < components[i].num_of_events; j++ ) { /* if event type is istant only save last value */ if ( components[i].event_types[j] == 1 ) node->values[cmp_iter].total += _local_components[i].values[j]; else node->values[cmp_iter].total += _local_components[i].values[j] - node->values[cmp_iter].offset; cmp_iter++; } } return ( PAPI_OK ); } static inline regions_t* _internal_hl_insert_region_node(regions_t** head_node, const char *region ) { regions_t *new_node; int i; int extended_total_num_events; /* number of all events including region count and CPU cycles */ extended_total_num_events = total_num_events + 2; /* create new region node */ new_node = malloc(sizeof(regions_t) + extended_total_num_events * sizeof(value_t)); if ( new_node == NULL ) return ( NULL ); new_node->region = (char *)malloc((strlen(region) + 1) * sizeof(char)); if ( new_node->region == NULL ) return ( NULL ); new_node->next = NULL; new_node->prev = NULL; strcpy(new_node->region, region); for ( i = 0; i < extended_total_num_events; i++ ) { new_node->values[i].total = 0; new_node->values[i].read_values = NULL; } /* insert node in list */ if ( *head_node == NULL ) { *head_node = new_node; return new_node; } (*head_node)->prev = new_node; new_node->next = *head_node; *head_node = new_node; return new_node; } static inline regions_t* _internal_hl_find_region_node(regions_t* head_node, const char *region ) { regions_t* find_node = head_node; while ( find_node != NULL ) { if ( strcmp(find_node->region, region) == 0 ) { return find_node; } find_node = find_node->next; } find_node = NULL; return find_node; } static inline threads_t* _internal_hl_insert_thread_node(unsigned long tid) { threads_t *new_node = (threads_t*)malloc(sizeof(threads_t)); if ( new_node == NULL ) return ( NULL ); new_node->key = tid; new_node->value = NULL; /* head node of region list */ tsearch(new_node, &binary_tree->root, compar); return new_node; } static inline threads_t* _internal_hl_find_thread_node(unsigned long tid) { threads_t *find_node = binary_tree->find_p; find_node->key = tid; void *found = tfind(find_node, &binary_tree->root, compar); if ( found != NULL ) { find_node = (*(threads_t**)found); return find_node; } return NULL; } static int _internal_hl_store_counters( unsigned long tid, const char *region, enum region_type reg_typ ) { int retval; _papi_hwi_lock( HIGHLEVEL_LOCK ); threads_t* current_thread_node; /* check if current thread is already stored in tree */ current_thread_node = _internal_hl_find_thread_node(tid); if ( current_thread_node == NULL ) { /* insert new node for current thread in tree if type is REGION_BEGIN */ if ( reg_typ == REGION_BEGIN ) { if ( ( current_thread_node = _internal_hl_insert_thread_node(tid) ) == NULL ) { _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( PAPI_ENOMEM ); } } else { _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( PAPI_EINVAL ); } } regions_t* current_region_node; /* check if node for current region already exists */ current_region_node = _internal_hl_find_region_node(current_thread_node->value, region); if ( current_region_node == NULL ) { /* create new node for current region in list if type is REGION_BEGIN */ if ( reg_typ == REGION_BEGIN ) { if ( ( current_region_node = _internal_hl_insert_region_node(&current_thread_node->value,region) ) == NULL ) { _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( PAPI_ENOMEM ); } } else { /* ignore no matching REGION_READ */ if ( reg_typ == REGION_READ ) { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot find matching region for PAPI_hl_read(\"%s\") for thread id=%lu.\n", region, PAPI_thread_id()); retval = PAPI_OK; } else { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot find matching region for PAPI_hl_region_end(\"%s\") for thread id=%lu.\n", region, PAPI_thread_id()); retval = PAPI_EINVAL; } _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( retval ); } } /* add recorded values to current region */ if ( ( retval = _internal_hl_add_values_to_region( current_region_node, reg_typ ) ) != PAPI_OK ) { _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( retval ); } /* count all REGION_BEGIN and REGION_END calls */ if ( reg_typ == REGION_BEGIN ) region_begin_cnt++; if ( reg_typ == REGION_END ) region_end_cnt++; _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( PAPI_OK ); } static int _internal_hl_read_counters() { int i, j, retval; for ( i = 0; i < num_of_components; i++ ) { if ( i < ( num_of_components - 1 ) ) { retval = PAPI_read( _local_components[i].EventSet, _local_components[i].values); } else { /* get cycles for last component */ retval = PAPI_read_ts( _local_components[i].EventSet, _local_components[i].values, &_local_cycles ); } HLDBG("Thread-ID:%lu, Component-ID:%d\n", PAPI_thread_id(), components[i].component_id); for ( j = 0; j < components[i].num_of_events; j++ ) { HLDBG("Thread-ID:%lu, %s:%lld\n", PAPI_thread_id(), components[i].event_names[j], _local_components[i].values[j]); } if ( retval != PAPI_OK ) return ( retval ); } return ( PAPI_OK ); } static int _internal_hl_read_and_store_counters( const char *region, enum region_type reg_typ ) { int retval; /* read all events */ if ( ( retval = _internal_hl_read_counters() ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: Could not read counters for thread %lu.\n", PAPI_thread_id()); _internal_hl_clean_up_all(true); return ( retval ); } /* store all events */ if ( ( retval = _internal_hl_store_counters( PAPI_thread_id(), region, reg_typ) ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: Could not store counters for thread %lu.\n", PAPI_thread_id()); verbose_fprintf(stdout, "PAPI-HL Advice: Check if your regions are matching.\n"); _internal_hl_clean_up_all(true); return ( retval ); } return ( PAPI_OK ); } static int _internal_hl_create_global_binary_tree() { if ( ( binary_tree = (binary_tree_t*)malloc(sizeof(binary_tree_t)) ) == NULL ) return ( PAPI_ENOMEM ); binary_tree->root = NULL; if ( ( binary_tree->find_p = (threads_t*)malloc(sizeof(threads_t)) ) == NULL ) return ( PAPI_ENOMEM ); return ( PAPI_OK ); } static int _internal_hl_mkdir(const char *dir) { int retval; int errno; char *tmp = NULL; char *p = NULL; size_t len; if ( ( tmp = strdup(dir) ) == NULL ) return ( PAPI_ENOMEM ); len = strlen(tmp); if(tmp[len - 1] == '/') tmp[len - 1] = 0; for(p = tmp + 1; *p; p++) { if(*p == '/') { *p = 0; errno = 0; retval = mkdir(tmp, S_IRWXU); if ( retval != 0 && errno != EEXIST ) return ( PAPI_ESYS ); *p = '/'; } } retval = mkdir(tmp, S_IRWXU); if ( retval != 0 && errno != EEXIST ) return ( PAPI_ESYS ); free(tmp); return ( PAPI_OK ); } static int _internal_hl_determine_output_path() { /* check if PAPI_OUTPUT_DIRECTORY is set */ char *output_prefix = NULL; if ( getenv("PAPI_OUTPUT_DIRECTORY") != NULL ) { if ( ( output_prefix = strdup( getenv("PAPI_OUTPUT_DIRECTORY") ) ) == NULL ) return ( PAPI_ENOMEM ); } else { if ( ( output_prefix = strdup( getcwd(NULL,0) ) ) == NULL ) return ( PAPI_ENOMEM ); } /* generate absolute path for measurement directory */ if ( ( absolute_output_file_path = (char *)malloc((strlen(output_prefix) + 64) * sizeof(char)) ) == NULL ) return ( PAPI_ENOMEM ); if ( output_counter > 0 ) sprintf(absolute_output_file_path, "%s/papi_%d", output_prefix, output_counter); else sprintf(absolute_output_file_path, "%s/papi", output_prefix); /* check if directory already exists */ struct stat buf; if ( stat(absolute_output_file_path, &buf) == 0 && S_ISDIR(buf.st_mode) ) { /* rename old directory by adding a timestamp */ char *new_absolute_output_file_path = NULL; if ( ( new_absolute_output_file_path = (char *)malloc((strlen(absolute_output_file_path) + 64) * sizeof(char)) ) == NULL ) return ( PAPI_ENOMEM ); /* create timestamp */ time_t t = time(NULL); struct tm tm = *localtime(&t); char m_time[32]; sprintf(m_time, "%d%02d%02d-%02d%02d%02d", tm.tm_year+1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); /* add timestamp to existing folder string */ sprintf(new_absolute_output_file_path, "%s-%s", absolute_output_file_path, m_time); uintmax_t current_unix_time = (uintmax_t)t; uintmax_t unix_time_from_old_directory = buf.st_mtime; /* This is a workaround for MPI applications!!! * Only rename existing measurement directory when it is older than * current timestamp. If it's not, we assume that another MPI process already created a new measurement directory. */ if ( unix_time_from_old_directory < current_unix_time ) { if ( rename(absolute_output_file_path, new_absolute_output_file_path) != 0 ) { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot rename old measurement directory.\n"); verbose_fprintf(stdout, "If you use MPI, another process may have already renamed the directory.\n"); } } free(new_absolute_output_file_path); } free(output_prefix); output_counter++; return ( PAPI_OK ); } static void _internal_hl_json_line_break_and_indent( FILE* f, bool b, int width ) { int i; if ( b ) { fprintf(f, "\n"); for ( i = 0; i < width; ++i ) fprintf(f, " "); } } static void _internal_hl_json_region_events(FILE* f, bool beautifier, regions_t *regions) { char **all_event_names = NULL; int extended_total_num_events; int i, j, cmp_iter; /* generate array of all events including region count and CPU cycles for output */ extended_total_num_events = total_num_events + 2; all_event_names = (char**)malloc(extended_total_num_events * sizeof(char*)); all_event_names[0] = "region_count"; all_event_names[1] = "cycles"; cmp_iter = 2; for ( i = 0; i < num_of_components; i++ ) { for ( j = 0; j < components[i].num_of_events; j++ ) { all_event_names[cmp_iter++] = components[i].event_names[j]; } } for ( j = 0; j < extended_total_num_events; j++ ) { _internal_hl_json_line_break_and_indent(f, beautifier, 6); /* print read values if available */ if ( regions->values[j].read_values != NULL) { reads_t* read_node = regions->values[j].read_values; /* going to last node */ while ( read_node->next != NULL ) { read_node = read_node->next; } /* read values in reverse order */ int read_cnt = 1; fprintf(f, "\"%s\":{", all_event_names[j]); _internal_hl_json_line_break_and_indent(f, beautifier, 7); fprintf(f, "\"total\":\"%lld\",", regions->values[j].total); while ( read_node != NULL ) { _internal_hl_json_line_break_and_indent(f, beautifier, 7); fprintf(f, "\"read_%d\":\"%lld\"", read_cnt,read_node->value); read_node = read_node->prev; if ( read_node == NULL ) { _internal_hl_json_line_break_and_indent(f, beautifier, 6); fprintf(f, "}"); if ( j < extended_total_num_events - 1 ) fprintf(f, ","); } else { fprintf(f, ","); } read_cnt++; } } else { HLDBG(" %s:%lld\n", all_event_names[j], regions->values[j].total); if ( j == ( extended_total_num_events - 1 ) ) { fprintf(f, "\"%s\":\"%lld\"", all_event_names[j], regions->values[j].total); } else { fprintf(f, "\"%s\":\"%lld\",", all_event_names[j], regions->values[j].total); } } } free(all_event_names); } static void _internal_hl_json_regions(FILE* f, bool beautifier, threads_t* thread_node) { /* iterate over regions list */ regions_t *regions = thread_node->value; /* going to last node */ while ( regions->next != NULL ) { regions = regions->next; } /* read regions in reverse order */ while (regions != NULL) { HLDBG(" Region:%s\n", regions->region); _internal_hl_json_line_break_and_indent(f, beautifier, 4); fprintf(f, "{"); _internal_hl_json_line_break_and_indent(f, beautifier, 5); fprintf(f, "\"%s\":{", regions->region); _internal_hl_json_region_events(f, beautifier, regions); _internal_hl_json_line_break_and_indent(f, beautifier, 5); fprintf(f, "}"); regions = regions->prev; _internal_hl_json_line_break_and_indent(f, beautifier, 4); if (regions == NULL ) { fprintf(f, "}"); } else { fprintf(f, "},"); } } } static void _internal_hl_json_threads(FILE* f, bool beautifier, unsigned long* tids, int threads_num) { int i; _internal_hl_json_line_break_and_indent(f, beautifier, 1); fprintf(f, "\"threads\":["); /* get regions of all threads */ for ( i = 0; i < threads_num; i++ ) { HLDBG("Thread ID:%lu\n", tids[i]); /* find values of current thread in global binary tree */ threads_t* thread_node = _internal_hl_find_thread_node(tids[i]); if ( thread_node != NULL ) { /* do we really need the exact thread id? */ _internal_hl_json_line_break_and_indent(f, beautifier, 2); fprintf(f, "{"); _internal_hl_json_line_break_and_indent(f, beautifier, 3); fprintf(f, "\"id\":\"%lu\",", thread_node->key); /* in case we only store iterator id as thread id */ //fprintf(f, "\"ID\":%d,", i); _internal_hl_json_line_break_and_indent(f, beautifier, 3); fprintf(f, "\"regions\":["); _internal_hl_json_regions(f, beautifier, thread_node); _internal_hl_json_line_break_and_indent(f, beautifier, 3); fprintf(f, "]"); _internal_hl_json_line_break_and_indent(f, beautifier, 2); if ( i < threads_num - 1 ) { fprintf(f, "},"); } else { fprintf(f, "}"); } } } _internal_hl_json_line_break_and_indent(f, beautifier, 1); fprintf(f, "]"); } static void _internal_hl_write_output() { if ( output_generated == false ) { _papi_hwi_lock( HIGHLEVEL_LOCK ); if ( output_generated == false ) { /* check if events were recorded */ if ( binary_tree == NULL ) { verbose_fprintf(stdout, "PAPI-HL Info: No events were recorded.\n"); return; } unsigned long *tids = NULL; int number_of_threads; FILE *output_file; /* current CPU frequency in MHz */ int cpu_freq; if ( region_begin_cnt == region_end_cnt ) { verbose_fprintf(stdout, "PAPI-HL Info: Print results...\n"); } else { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot generate output due to not matching regions.\n"); output_generated = true; HLDBG("region_begin_cnt=%d, region_end_cnt=%d\n", region_begin_cnt, region_end_cnt); _papi_hwi_unlock( HIGHLEVEL_LOCK ); return; } /* create new measurement directory */ if ( ( _internal_hl_mkdir(absolute_output_file_path) ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: Cannot create measurement directory %s.\n", absolute_output_file_path); return; } /* determine rank for output file */ int rank = _internal_hl_determine_rank(); if ( rank < 0 ) { /* generate unique rank number */ sprintf(absolute_output_file_path + strlen(absolute_output_file_path), "/rank_XXXXXX"); int fd; fd = mkstemp(absolute_output_file_path); close(fd); } else { sprintf(absolute_output_file_path + strlen(absolute_output_file_path), "/rank_%04d", rank); } /* determine current cpu frequency */ cpu_freq = PAPI_get_opt( PAPI_CLOCKRATE, NULL ); output_file = fopen(absolute_output_file_path, "w"); if ( output_file == NULL ) { verbose_fprintf(stdout, "PAPI-HL Error: Cannot create output file %s!\n", absolute_output_file_path); return; } else { /* list all threads */ if ( PAPI_list_threads( tids, &number_of_threads ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_list_threads call failed!\n"); return; } if ( ( tids = malloc( number_of_threads * sizeof(unsigned long) ) ) == NULL ) { verbose_fprintf(stdout, "PAPI-HL Error: OOM!\n"); return; } if ( PAPI_list_threads( tids, &number_of_threads ) != PAPI_OK ) { verbose_fprintf(stdout, "PAPI-HL Error: PAPI_list_threads call failed!\n"); return; } /* start writing json file */ /* JSON beautifier (line break and indent) */ bool beautifier = true; /* start of JSON file */ fprintf(output_file, "{"); _internal_hl_json_line_break_and_indent(output_file, beautifier, 1); fprintf(output_file, "\"cpu in mhz\":\"%d\",", cpu_freq); /* write all regions with events per thread */ _internal_hl_json_threads(output_file, beautifier, tids, number_of_threads); /* end of JSON file */ _internal_hl_json_line_break_and_indent(output_file, beautifier, 0); fprintf(output_file, "}"); fprintf(output_file, "\n"); fclose(output_file); free(tids); if ( getenv("PAPI_REPORT") != NULL ) { /* print output to stdout */ printf("\n\nPAPI-HL Output:\n"); output_file = fopen(absolute_output_file_path, "r"); int c = fgetc(output_file); while (c != EOF) { printf("%c", c); c = fgetc(output_file); } printf("\n"); fclose(output_file); } } output_generated = true; } _papi_hwi_unlock( HIGHLEVEL_LOCK ); } } static void _internal_hl_clean_up_local_data() { int i, retval; /* destroy all EventSets from local data */ if ( _local_components != NULL ) { HLDBG("Thread-ID:%lu\n", PAPI_thread_id()); for ( i = 0; i < num_of_components; i++ ) { if ( ( retval = PAPI_stop( _local_components[i].EventSet, _local_components[i].values ) ) != PAPI_OK ) /* only print error when event set is running */ if ( retval != -9 ) verbose_fprintf(stdout, "PAPI-HL Error: PAPI_stop failed: %d.\n", retval); if ( ( retval = PAPI_cleanup_eventset (_local_components[i].EventSet) ) != PAPI_OK ) verbose_fprintf(stdout, "PAPI-HL Error: PAPI_cleanup_eventset failed: %d.\n", retval); if ( ( retval = PAPI_destroy_eventset (&_local_components[i].EventSet) ) != PAPI_OK ) verbose_fprintf(stdout, "PAPI-HL Error: PAPI_destroy_eventset failed: %d.\n", retval); free(_local_components[i].values); } free(_local_components); _local_components = NULL; /* count global thread variable */ _papi_hwi_lock( HIGHLEVEL_LOCK ); num_of_cleaned_threads++; _papi_hwi_unlock( HIGHLEVEL_LOCK ); } _local_state = PAPIHL_DEACTIVATED; } static void _internal_hl_clean_up_global_data() { int i; int extended_total_num_events; /* clean up binary tree of recorded events */ threads_t *thread_node; if ( binary_tree != NULL ) { while ( binary_tree->root != NULL ) { thread_node = *(threads_t **)binary_tree->root; /* clean up double linked list of region data */ regions_t *region = thread_node->value; regions_t *tmp; while ( region != NULL ) { /* clean up read node list */ extended_total_num_events = total_num_events + 2; for ( i = 0; i < extended_total_num_events; i++ ) { reads_t *read_node = region->values[i].read_values; reads_t *read_node_tmp; while ( read_node != NULL ) { read_node_tmp = read_node; read_node = read_node->next; free(read_node_tmp); } } tmp = region; region = region->next; free(tmp->region); free(tmp); } free(region); tdelete(thread_node, &binary_tree->root, compar); free(thread_node); } } /* we cannot free components here since other threads could still use them */ /* clean up requested event names */ for ( i = 0; i < num_of_requested_events; i++ ) free(requested_event_names[i]); free(requested_event_names); free(absolute_output_file_path); } static void _internal_hl_clean_up_all(bool deactivate) { int i, num_of_threads; /* we assume that output has been already generated or * cannot be generated due to previous errors */ output_generated = true; /* clean up thread local data */ if ( _local_state == PAPIHL_ACTIVE ) { HLDBG("Clean up thread local data for thread %lu\n", PAPI_thread_id()); _internal_hl_clean_up_local_data(); } /* clean up global data */ if ( state == PAPIHL_ACTIVE ) { _papi_hwi_lock( HIGHLEVEL_LOCK ); if ( state == PAPIHL_ACTIVE ) { verbose_fprintf(stdout, "PAPI-HL Info: Output generation is deactivated!\n"); HLDBG("Clean up global data for thread %lu\n", PAPI_thread_id()); _internal_hl_clean_up_global_data(); /* check if all other registered threads have cleaned up */ PAPI_list_threads(NULL, &num_of_threads); HLDBG("Number of registered threads: %d.\n", num_of_threads); HLDBG("Number of cleaned threads: %d.\n", num_of_cleaned_threads); if ( _internal_hl_check_for_clean_thread_states() == PAPI_OK && num_of_threads == num_of_cleaned_threads ) { PAPI_shutdown(); /* clean up components */ for ( i = 0; i < num_of_components; i++ ) { free(components[i].event_names); free(components[i].event_codes); free(components[i].event_types); } free(components); HLDBG("PAPI-HL shutdown!\n"); } else { verbose_fprintf(stdout, "PAPI-HL Warning: Could not call PAPI_shutdown() since some threads still have running event sets. Make sure to call PAPI_hl_cleanup_thread() at the end of all parallel regions and PAPI_hl_finalize() in the master thread!\n"); } /* deactivate PAPI-HL */ if ( deactivate ) state = PAPIHL_DEACTIVATED; } _papi_hwi_unlock( HIGHLEVEL_LOCK ); } } static int _internal_hl_check_for_clean_thread_states() { EventSetInfo_t *ESI; DynamicArray_t *map = &_papi_hwi_system_info.global_eventset_map; int i; for( i = 0; i < map->totalSlots; i++ ) { ESI = map->dataSlotArray[i]; if ( ESI ) { if ( ESI->state & PAPI_RUNNING ) return ( PAPI_EISRUN ); } } return ( PAPI_OK ); } /** @class PAPI_hl_init * @brief Initializes the high-level PAPI library. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_init(); * * @retval PAPI_OK * @retval PAPI_HIGH_LEVEL_INITED * -- Initialization was already called. * @retval PAPI_EMISC * -- Initialization failed. * @retval PAPI_ENOMEM * -- Insufficient memory. * * PAPI_hl_init initializes the PAPI library and some high-level specific features. * If your application is making use of threads you do not need to call any other low level * initialization functions as PAPI_hl_init includes thread support. * Note that the first call of PAPI_hl_region_begin will automatically call PAPI_hl_init * if not already called. * * @par Example: * * @code * int retval; * * retval = PAPI_hl_init(); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_cleanup_thread * @see PAPI_hl_finalize * @see PAPI_hl_set_events * @see PAPI_hl_region_begin * @see PAPI_hl_read * @see PAPI_hl_region_end * @see PAPI_hl_print_output */ int _internal_PAPI_hl_init() { if ( state == PAPIHL_ACTIVE ) { if ( hl_initiated == false && hl_finalized == false ) { _internal_hl_onetime_library_init(); /* check if the library has been initialized successfully */ if ( state == PAPIHL_DEACTIVATED ) return ( PAPI_EMISC ); return ( PAPI_OK ); } return ( PAPI_ENOINIT ); } return ( PAPI_EMISC ); } /** @class PAPI_hl_cleanup_thread * @brief Cleans up all thread-local data. * * @par C Interface: * \#include <papi.h> @n * void PAPI_hl_cleanup_thread( ); * * @retval PAPI_OK * @retval PAPI_EMISC * -- Thread has been already cleaned up or PAPI is deactivated due to previous errors. * * PAPI_hl_cleanup_thread shuts down thread-local event sets and cleans local * data structures. It is recommended to use this function in combination with * PAPI_hl_finalize if your application is making use of threads. * * @par Example: * * @code * int retval; * * #pragma omp parallel * { * retval = PAPI_hl_region_begin("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * //Do some computation here * * retval = PAPI_hl_region_end("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * retval = PAPI_hl_cleanup_thread(); * if ( retval != PAPI_OK ) * handle_error(1); * } * * retval = PAPI_hl_finalize(); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_init * @see PAPI_hl_finalize * @see PAPI_hl_set_events * @see PAPI_hl_region_begin * @see PAPI_hl_read * @see PAPI_hl_region_end * @see PAPI_hl_print_output */ int _internal_PAPI_hl_cleanup_thread() { if ( state == PAPIHL_ACTIVE && hl_initiated == true && _local_state == PAPIHL_ACTIVE ) { /* do not clean local data from master thread */ if ( master_thread_id != PAPI_thread_id() ) _internal_hl_clean_up_local_data(); return ( PAPI_OK ); } return ( PAPI_EMISC ); } /** @class PAPI_hl_finalize * @brief Finalizes the high-level PAPI library. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_finalize( ); * * @retval PAPI_OK * @retval PAPI_EMISC * -- PAPI has been already finalized or deactivated due to previous errors. * * PAPI_hl_finalize finalizes the high-level library by destroying all counting event sets * and internal data structures. * * @par Example: * * @code * int retval; * * retval = PAPI_hl_finalize(); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_init * @see PAPI_hl_cleanup_thread * @see PAPI_hl_set_events * @see PAPI_hl_region_begin * @see PAPI_hl_read * @see PAPI_hl_region_end * @see PAPI_hl_print_output */ int _internal_PAPI_hl_finalize() { if ( state == PAPIHL_ACTIVE && hl_initiated == true ) { _internal_hl_clean_up_all(true); return ( PAPI_OK ); } return ( PAPI_EMISC ); } /** @class PAPI_hl_set_events * @brief Generates event sets based on a list of hardware events. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_set_events( const char* events ); * * @param events * -- list of hardware events separated by commas * * @retval PAPI_OK * @retval PAPI_EMISC * -- PAPI has been deactivated due to previous errors. * @retval PAPI_ENOMEM * -- Insufficient memory. * * PAPI_hl_set_events offers the user the possibility to determine hardware events in * the source code as an alternative to the environment variable PAPI_EVENTS. * Note that the content of PAPI_EVENTS is ignored if PAPI_hl_set_events was successfully executed. * If the events argument cannot be interpreted, default hardware events are * taken for the measurement. * * @par Example: * * @code * int retval; * * retval = PAPI_hl_set_events("PAPI_TOT_INS,PAPI_TOT_CYC"); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_init * @see PAPI_hl_cleanup_thread * @see PAPI_hl_finalize * @see PAPI_hl_region_begin * @see PAPI_hl_read * @see PAPI_hl_region_end * @see PAPI_hl_print_output */ int _internal_PAPI_hl_set_events(const char* events) { int retval; if ( state == PAPIHL_ACTIVE ) { /* This may only be called once after the high-level API was successfully * initiated. Any second call just returns PAPI_OK without doing an * expensive lock. */ if ( hl_initiated == true ) { if ( events_determined == false ) { _papi_hwi_lock( HIGHLEVEL_LOCK ); if ( events_determined == false && state == PAPIHL_ACTIVE ) { HLDBG("Set events: %s\n", events); if ( ( retval = _internal_hl_read_events(events) ) != PAPI_OK ) { state = PAPIHL_DEACTIVATED; _internal_hl_clean_up_global_data(); _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( retval ); } if ( ( retval = _internal_hl_create_global_binary_tree() ) != PAPI_OK ) { state = PAPIHL_DEACTIVATED; _internal_hl_clean_up_global_data(); _papi_hwi_unlock( HIGHLEVEL_LOCK ); return ( retval ); } } _papi_hwi_unlock( HIGHLEVEL_LOCK ); } } /* in case the first locked thread ran into problems */ if ( state == PAPIHL_DEACTIVATED) return ( PAPI_EMISC ); return ( PAPI_OK ); } return ( PAPI_EMISC ); } /** @class PAPI_hl_print_output * @brief Prints values of hardware events. * * @par C Interface: * \#include <papi.h> @n * void PAPI_hl_print_output( ); * * PAPI_hl_print_output prints the measured values of hardware events in one file for serial * or thread parallel applications. * Multi-processing applications, such as MPI, will have one output file per process. * Each output file contains measured values of all threads. * The entire measurement can be converted in a better readable output via python. * For more information, see <a href="https://bitbucket.org/icl/papi/wiki/papi-hl.md">High Level API</a>. * Note that if PAPI_hl_print_output is not called explicitly PAPI will try to generate output * at the end of the application. However, for some reason, this feature sometimes does not work. * It is therefore recommended to call PAPI_hl_print_output for larger applications. * * @par Example: * * @code * * PAPI_hl_print_output(); * * @endcode * * @see PAPI_hl_init * @see PAPI_hl_cleanup_thread * @see PAPI_hl_finalize * @see PAPI_hl_set_events * @see PAPI_hl_region_begin * @see PAPI_hl_read * @see PAPI_hl_region_end */ void _internal_PAPI_hl_print_output() { if ( state == PAPIHL_ACTIVE && hl_initiated == true && output_generated == false ) { _internal_hl_write_output(); } } /** @class PAPI_hl_region_begin * @brief Reads and stores hardware events at the beginning of an instrumented code region. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_region_begin( const char* region ); * * @param region * -- a unique region name * * @retval PAPI_OK * @retval PAPI_ENOTRUN * -- EventSet is currently not running or could not determined. * @retval PAPI_ESYS * -- A system or C library call failed inside PAPI, see the errno variable. * @retval PAPI_EMISC * -- PAPI has been deactivated due to previous errors. * @retval PAPI_ENOMEM * -- Insufficient memory. * * PAPI_hl_region_begin reads hardware events and stores them internally at the beginning * of an instrumented code region. * If not specified via environment variable PAPI_EVENTS, default events are used. * The first call sets all counters implicitly to zero and starts counting. * Note that if PAPI_EVENTS is not set or cannot be interpreted, default hardware events are * recorded. * * @par Example: * * @code * export PAPI_EVENTS="PAPI_TOT_INS,PAPI_TOT_CYC" * @endcode * * * @code * int retval; * * retval = PAPI_hl_region_begin("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * //Do some computation here * * retval = PAPI_hl_region_end("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_read * @see PAPI_hl_region_end */ int PAPI_hl_region_begin( const char* region ) { int retval; if ( state == PAPIHL_DEACTIVATED ) { /* check if we have to clean up local stuff */ if ( _local_state == PAPIHL_ACTIVE ) _internal_hl_clean_up_local_data(); return ( PAPI_EMISC ); } if ( hl_finalized == true ) return ( PAPI_ENOTRUN ); if ( hl_initiated == false ) { if ( ( retval = _internal_PAPI_hl_init() ) != PAPI_OK ) return ( retval ); } if ( events_determined == false ) { if ( ( retval = _internal_PAPI_hl_set_events(NULL) ) != PAPI_OK ) return ( retval ); } if ( _local_components == NULL ) { if ( ( retval = _internal_hl_create_event_sets() ) != PAPI_OK ) { HLDBG("Could not create local events sets for thread %lu.\n", PAPI_thread_id()); _internal_hl_clean_up_all(true); return ( retval ); } } /* read and store all events */ HLDBG("Thread ID:%lu, Region:%s\n", PAPI_thread_id(), region); if ( ( retval = _internal_hl_read_and_store_counters(region, REGION_BEGIN) ) != PAPI_OK ) return ( retval ); _local_region_begin_cnt++; return ( PAPI_OK ); } /** @class PAPI_hl_read * @brief Reads and stores hardware events inside of an instrumented code region. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_read( const char* region ); * * @param region * -- a unique region name corresponding to PAPI_hl_region_begin * * @retval PAPI_OK * @retval PAPI_ENOTRUN * -- EventSet is currently not running or could not determined. * @retval PAPI_ESYS * -- A system or C library call failed inside PAPI, see the errno variable. * @retval PAPI_EMISC * -- PAPI has been deactivated due to previous errors. * @retval PAPI_ENOMEM * -- Insufficient memory. * * PAPI_hl_read reads hardware events and stores them internally inside * of an instrumented code region. * Assumes that PAPI_hl_region_begin was called before. * * @par Example: * * @code * int retval; * * retval = PAPI_hl_region_begin("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * //Do some computation here * * retval = PAPI_hl_read("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * //Do some computation here * * retval = PAPI_hl_region_end("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_region_begin * @see PAPI_hl_region_end */ int PAPI_hl_read(const char* region) { int retval; if ( state == PAPIHL_DEACTIVATED ) { /* check if we have to clean up local stuff */ if ( _local_state == PAPIHL_ACTIVE ) _internal_hl_clean_up_local_data(); return ( PAPI_EMISC ); } if ( _local_region_begin_cnt == 0 ) { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot find matching region for PAPI_hl_read(\"%s\") for thread %lu.\n", region, PAPI_thread_id()); return ( PAPI_EMISC ); } if ( _local_components == NULL ) return ( PAPI_ENOTRUN ); /* read and store all events */ HLDBG("Thread ID:%lu, Region:%s\n", PAPI_thread_id(), region); if ( ( retval = _internal_hl_read_and_store_counters(region, REGION_READ) ) != PAPI_OK ) return ( retval ); return ( PAPI_OK ); } /** @class PAPI_hl_region_end * @brief Reads and stores hardware events at the end of an instrumented code region. * * @par C Interface: * \#include <papi.h> @n * int PAPI_hl_region_end( const char* region ); * * @param region * -- a unique region name corresponding to PAPI_hl_region_begin * * @retval PAPI_OK * @retval PAPI_ENOTRUN * -- EventSet is currently not running or could not determined. * @retval PAPI_ESYS * -- A system or C library call failed inside PAPI, see the errno variable. * @retval PAPI_EMISC * -- PAPI has been deactivated due to previous errors. * @retval PAPI_ENOMEM * -- Insufficient memory. * * PAPI_hl_region_end reads hardware events and stores the difference to the values from * PAPI_hl_region_begin at the end of an instrumented code region. * Assumes that PAPI_hl_region_begin was called before. * Note that an output is automatically generated when your application terminates. * * * @par Example: * * @code * int retval; * * retval = PAPI_hl_region_begin("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * //Do some computation here * * retval = PAPI_hl_region_end("computation"); * if ( retval != PAPI_OK ) * handle_error(1); * * @endcode * * @see PAPI_hl_region_begin * @see PAPI_hl_read */ int PAPI_hl_region_end( const char* region ) { int retval; if ( state == PAPIHL_DEACTIVATED ) { /* check if we have to clean up local stuff */ if ( _local_state == PAPIHL_ACTIVE ) _internal_hl_clean_up_local_data(); return ( PAPI_EMISC ); } if ( _local_region_begin_cnt == 0 ) { verbose_fprintf(stdout, "PAPI-HL Warning: Cannot find matching region for PAPI_hl_region_end(\"%s\") for thread %lu.\n", region, PAPI_thread_id()); return ( PAPI_EMISC ); } if ( _local_components == NULL ) return ( PAPI_ENOTRUN ); /* read and store all events */ HLDBG("Thread ID:%lu, Region:%s\n", PAPI_thread_id(), region); if ( ( retval = _internal_hl_read_and_store_counters(region, REGION_END) ) != PAPI_OK ) return ( retval ); _local_region_end_cnt++; return ( PAPI_OK ); }
trsm_x_coo_u_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { int m = A->rows; int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = 0; r < m; r++) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT cr = 0; cr < A->nnz; cr++) { int row = A->row_indx[cr]; int col = A->col_indx[cr]; if(row == r && col < r) alpha_madde(temp, A->values[cr], y[col * ldy + out_y_col]); } ALPHA_Number t; alpha_mul(t, alpha, x[r * ldx + out_y_col]); alpha_sub(y[r * ldy + out_y_col], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
original.c
#include <omp.h> int i, j, k; double a[2000UL][2000UL]; double b[2000UL][2000UL]; double c[2000UL][2000UL]; void compute() { #pragma omp parallel for shared( a, b, c) private(i, j, k) for (i = 0; i < 2000; i++) { for (j = 0; j < 2000; j++) { for (k = 0; k < 2000; k++) { c[i][j] += (a[i][k] * b[k][j]); } } } } int main(int argc,char *argv[]) { #pragma omp parallel for shared(a) private(i, j) for (i = 0; i < 2000; i++) { for (j = 0; j < 2000; j++) { a[i][j] = (i + j); } } #pragma omp parallel for shared(b) private(i, j) for (i = 0; i < 2000; i++) { for (j = 0; j < 2000; j++) { b[i][j] = (i * j); } } #pragma omp parallel for shared(c) private(i, j) for (i = 0; i < 2000; i++) { for (j = 0; j < 2000; j++) { c[i][j] = 0; } } compute(); return 0; }
softmax_hcl_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "softmax_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> #include <arm_neon.h> static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static inline float32x4_t vexpq10_f32(float32x4_t x) { x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10 x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); return x; } static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* array_ptr = ( float* )array; memset(array, 0, in_size * sizeof(float)); // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { float32x4_t _p = vld1q_f32(array_ptr + i); float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i); #ifdef __aarch64__ _p = vpmaxq_f32(_p, _in); #else _p = vmaxq_f32(_p, vrev64q_f32(_in)); _p = vmaxq_f32(_p, vextq_f32(_p, _in, 2)); #endif vst1q_f32(array_ptr + i, _p); } for (int i = in_size & ~3; i < in_size; i++) { if (array_ptr[i] < input_ptr[j * in_size + i]) array_ptr[i] = input_ptr[j * in_size + i]; } /* for(int l = 0; l < in_size; l++) { if(array_ptr[l] < input_ptr[j * in_size + l]) array_ptr[l] = input_ptr[j * in_size + l]; } */ } } static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* output_ptr = ( float* )output; float* maxarray_ptr = ( float* )maxarray; float* sum_array_ptr = ( float* )sum_array; memset(sum_array, 0x0, in_size * sizeof(float)); /* get the exp and the summary */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { int index = j * in_size + i; float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i))); float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out); vst1q_f32(output_ptr + index, out); vst1q_f32(sum_array_ptr + i, sum); } for (int i = in_size & ~3; i < in_size; i++) { int index = j * in_size + i; output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]); sum_array_ptr[i] += output_ptr[index]; } } /* for(int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] = exp(input_ptr[index] - array_ptr[l]); sum_array_ptr[l] += output_ptr[index]; } */ /* the final result */ for (int j = 0; j < on_size; j++) for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] /= sum_array_ptr[l]; } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem; int element_size = input_tensor->elem_size; int dims[4]; for (int i = 0; i < input_tensor->dim_num; i++) { dims[i] = input_tensor->dims[i]; } int axis = softmax_param->axis; int out_size, in_size, on_size; out_size = 1; for (int i = 0; i < axis; i++) { out_size *= dims[i]; } in_size = 1; for (size_t i = axis + 1; i < input_tensor->dim_num; i++) { in_size *= dims[i]; } on_size = dims[axis]; uint8_t* input = input_tensor->data; uint8_t* output = output_tensor->data; float* max_array = ( float* )malloc(in_size * sizeof(float)); float* sum_array = ( float* )malloc(in_size * sizeof(float)); int on_in_size = on_size * in_size; float* input_f = NULL; float* output_f = NULL; if (element_size == 1) { input_f = ( float* )malloc(on_in_size * 4); output_f = ( float* )malloc(on_in_size * 4); /* todo */ free(input_f); free(output_f); } for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * on_in_size * element_size; GetMaxArray(( float* )(input + img_base), max_array, in_size, on_size, exec_graph->num_thread); GetOutResult(( float* )(input + img_base), ( float* )(output + img_base), max_array, sum_array, in_size, on_size, exec_graph->num_thread); } free(max_array); free(sum_array); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* todo support uint8 */ if (input_tensor->data_type != TENGINE_DT_FP32) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_softmax_hcl_arm_op(void* arg) { return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } int unregister_softmax_hcl_arm_op(void* arg) { return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(8*t1+Nx+7,128)),floord(16*t2+Nx+3,128)),floord(8*t3+Nx-5,128)),floord(16*t1-16*t2+Nz+Nx+5,128));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),32*t4+30);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
solver-omp.c
#include "heat.h" #define NB 8 #define min(a,b) ( ((a) < (b)) ? (a) : (b) ) /* * Blocked Jacobi solver: one iteration step */ double relax_jacobi (double *u, double *utmp, unsigned sizex, unsigned sizey) { double diff, sum=0.0; int nbx, bx, nby, by; nbx = omp_get_max_threads();//NB; bx = sizex/nbx + ((sizex%nbx) ? 1 : 0);//sizex/nbx; nby = 1;//NB; by = sizey/nby; #pragma omp parallel for reduction(+:sum) private(diff) for (int ii=0; ii<nbx; ii++) { for (int jj=0; jj<nby; jj++) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } } } } return sum; } /* * Blocked Red-Black solver: one iteration step */ double relax_redblack (double *u, unsigned sizex, unsigned sizey) { double unew, diff, sum=0.0; int nbx, bx, nby, by; int lsw; //* nbx = NB; bx = sizex/nbx; nby = NB; by = sizey/nby; // */ /* nbx = omp_get_max_threads(); bx = sizex/nbx + ((sizex%nbx) ? 1 : 0); nby = 1; by = sizey/nby; // */ // Computing "Red" blocks #pragma omp parallel for reduction(+:sum) private(diff, lsw) for (int ii=0; ii<nbx; ii++) { lsw = ii%2; for (int jj=lsw; jj<nby; jj=jj+2) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } } } #pragma omp parallel for reduction(+:sum) private(diff, lsw) // Computing "Black" blocks for (int ii=0; ii<nbx; ii++) { lsw = (ii+1)%2; for (int jj=lsw; jj<nby; jj=jj+2) { for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) { for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } } } } return sum; } /* * Blocked Gauss-Seidel solver: one iteration step */ double relax_gauss (double *u, unsigned sizex, unsigned sizey) { double unew, diff, sum=0.0; int nbx, bx, nby, by; /* nbx = NB; bx = sizex/nbx; nby = NB; by = sizey/nby; // */ //* nbx = omp_get_max_threads(); bx = sizex/nbx + ((sizex%nbx) ? 1 : 0); nby = 1; by = sizey/nby; // */ #pragma omp parallel for reduction(+:sum) private(diff) for (int ii=0; ii<nbx; ii++) for (int jj=0; jj<nby; jj++) for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { unew= 0.25 * ( u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = unew - u[i*sizey+ j]; sum += diff * diff; u[i*sizey+j]=unew; } return sum; }
lis_precision_vec.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <math.h> #ifdef USE_SSE2 #include <emmintrin.h> #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #ifdef USE_QUAD_PRECISION #undef __FUNC__ #define __FUNC__ "lis_quad_malloc" LIS_INT lis_quad_malloc(LIS_QUAD_PTR *a, LIS_INT n) { double *ah, *al; LIS_DEBUG_FUNC_IN; ah = (double *)lis_malloc(2*n*sizeof(double),"lis_quad_malloc::ah"); al = &ah[n]; a->hi = ah; a->lo = al; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_quad_free" LIS_INT lis_quad_free(LIS_QUAD_PTR *a) { LIS_DEBUG_FUNC_IN; lis_free(a->hi); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_axpyex_mmm" LIS_INT lis_vector_axpyex_mmm(LIS_QUAD_PTR alpha, LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n,is,ie,nprocs,my_rank; LIS_QUAD_PTR bx,aa; LIS_QUAD_PTR ax; LIS_SCALAR *x,*xl,*y,*yl; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; xl = vx->value_lo; yl = vy->value_lo; bx.hi = &vx->work[0]; bx.lo = &vx->work[2]; aa.hi = &vx->work[4]; aa.lo = &vx->work[6]; ax.hi = &vx->work[8]; ax.lo = &vx->work[9]; #ifndef USE_FMA2_SSE2 #pragma cdir nodep #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) for(i=0; i<n; i++) { LIS_QUAD_FMA(y[i],yl[i],y[i],yl[i],alpha.hi[0],alpha.lo[0],x[i],xl[i]); } #else #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif aa.hi[0] = aa.hi[1] = alpha.hi[0]; aa.lo[0] = aa.lo[1] = alpha.lo[0]; #ifdef _OPENMP #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie-1;i+=2) { LIS_QUAD_FMA2_SSE2(y[i],yl[i],y[i],yl[i],aa.hi[0],aa.lo[0],x[i],xl[i]); } for(;i<ie;i++) { LIS_QUAD_FMA_SSE2(y[i],yl[i],y[i],yl[i],alpha.hi[0],alpha.lo[0],x[i],xl[i]); } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_axpyzex_mmmm" LIS_INT lis_vector_axpyzex_mmmm(LIS_QUAD_PTR alpha, LIS_VECTOR vx, LIS_VECTOR vy, LIS_VECTOR vz) { LIS_INT i,n,is,ie,nprocs,my_rank; LIS_QUAD_PTR bx,aa; LIS_QUAD_PTR ax; LIS_SCALAR *x,*y,*z; LIS_SCALAR *xl,*yl,*zl; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; z = vz->value; xl = vx->value_lo; yl = vy->value_lo; zl = vz->value_lo; bx.hi = &vx->work[0]; bx.lo = &vx->work[2]; aa.hi = &vx->work[4]; aa.lo = &vx->work[6]; ax.hi = &vx->work[8]; ax.lo = &vx->work[9]; #ifndef USE_FMA2_SSE2 #pragma cdir nodep #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) for(i=0; i<n; i++) { LIS_QUAD_FMA(z[i],zl[i],y[i],yl[i],alpha.hi[0],alpha.lo[0],x[i],xl[i]); } #else #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif aa.hi[0] = aa.hi[1] = alpha.hi[0]; aa.lo[0] = aa.lo[1] = alpha.lo[0]; #ifdef _OPENMP #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie-1;i+=2) { LIS_QUAD_FMA2_SSE2(z[i],zl[i],y[i],yl[i],aa.hi[0],aa.lo[0],x[i],xl[i]); } for(;i<ie;i++) { LIS_QUAD_FMA_SSE2(z[i],zl[i],y[i],yl[i],alpha.hi[0],alpha.lo[0],x[i],xl[i]); } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_xpayex_mmm" LIS_INT lis_vector_xpayex_mmm(LIS_VECTOR vx, LIS_QUAD_PTR alpha, LIS_VECTOR vy) { LIS_INT i,n,is,ie,nprocs,my_rank; LIS_QUAD_PTR by,aa; LIS_QUAD_PTR ay; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; xl = vx->value_lo; yl = vy->value_lo; by.hi = &vx->work[0]; by.lo = &vx->work[2]; aa.hi = &vx->work[4]; aa.lo = &vx->work[6]; ay.hi = &vx->work[8]; ay.lo = &vx->work[9]; #ifndef USE_FMA2_SSE2 #pragma cdir nodep #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) for(i=0; i<n; i++) { LIS_QUAD_FMA(y[i],yl[i],x[i],xl[i],alpha.hi[0],alpha.lo[0],y[i],yl[i]); } #else #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif aa.hi[0] = aa.hi[1] = alpha.hi[0]; aa.lo[0] = aa.lo[1] = alpha.lo[0]; #ifdef _OPENMP #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie-1;i+=2) { LIS_QUAD_FMA2_SSE2(y[i],yl[i],x[i],xl[i],aa.hi[0],aa.lo[0],y[i],yl[i]); } for(;i<ie;i++) { LIS_QUAD_FMA_SSE2(y[i],yl[i],x[i],xl[i],alpha.hi[0],alpha.lo[0],y[i],yl[i]); } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_dotex_mmm" LIS_INT lis_vector_dotex_mmm(LIS_VECTOR vx, LIS_VECTOR vy, LIS_QUAD_PTR *val) { LIS_INT i,n; LIS_SCALAR *x,*y,*xl,*yl; LIS_QUAD_PTR dotm2,xy2,dotm,tmpm,xy; #ifdef _OPENMP LIS_INT is,ie,nprocs,my_rank; LIS_SCALAR *gt; #endif #ifdef USE_MPI MPI_Comm comm; #endif LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; xl = vx->value_lo; yl = vy->value_lo; dotm2.hi = &vx->work[0]; dotm2.lo = &vx->work[2]; xy2.hi = &vx->work[4]; xy2.lo = &vx->work[6]; dotm.hi = &vx->work[8]; dotm.lo = &vx->work[9]; tmpm.hi = &vx->work[10]; tmpm.lo = &vx->work[11]; xy.hi = &vx->work[12]; xy.lo = &vx->work[13]; #ifndef NO_ERROR_CHECK if( n!=vy->n ) { LIS_SETERR(LIS_ERR_ILL_ARG,"length of vector x and y is not equal\n"); return LIS_ERR_ILL_ARG; } #endif #ifdef USE_MPI comm = vx->comm; #endif #ifdef _OPENMP gt = lis_vec_tmp; nprocs = omp_get_max_threads(); #ifndef USE_SSE2 #pragma omp parallel private(i,p1,p2,tq,bhi,blo,chi,clo,sh,th,sl,tl,eh,el,is,ie,my_rank) #else #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); #ifndef USE_FMA2_SSE2 gt[my_rank*LIS_VEC_TMP_PADD] = gt[my_rank*LIS_VEC_TMP_PADD+1] = 0.0; #pragma cdir nodep for(i=is;i<ie;i++) { LIS_QUAD_FMA(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],y[i],yl[i],x[i],xl[i]); } #else gt[my_rank*LIS_VEC_TMP_PADD ] = gt[my_rank*LIS_VEC_TMP_PADD+1] = 0.0; gt[my_rank*LIS_VEC_TMP_PADD+2] = gt[my_rank*LIS_VEC_TMP_PADD+3] = 0.0; #ifdef USE_VEC_COMP #pragma cdir nodep #endif for(i=is;i<ie-1;i+=2) { LIS_QUAD_FMA2_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],y[i],yl[i],x[i],xl[i]); } LIS_QUAD_ADD_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD+3]); for(;i<ie;i++) { LIS_QUAD_FMA_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],y[i],yl[i],x[i],xl[i]); } #endif } dotm.hi[0] = dotm.lo[0] = 0.0; for(i=0;i<nprocs;i++) { #ifndef USE_SSE2 LIS_QUAD_ADD(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],gt[i*LIS_VEC_TMP_PADD],gt[i*LIS_VEC_TMP_PADD+1]); #else LIS_QUAD_ADD_SSE2(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],gt[i*LIS_VEC_TMP_PADD],gt[i*LIS_VEC_TMP_PADD+1]); #endif } #else #ifndef USE_FMA2_SSE2 dotm.hi[0] = dotm.lo[0] = 0.0; #pragma cdir nodep for(i=0;i<n;i++) { LIS_QUAD_FMA(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],y[i],yl[i],x[i],xl[i]); } #else dotm2.hi[0] = dotm2.hi[1] = 0.0; dotm2.lo[0] = dotm2.lo[1] = 0.0; for(i=0;i<n-1;i+=2) { LIS_QUAD_FMA2_SSE2(dotm2.hi[0],dotm2.lo[0],dotm2.hi[0],dotm2.lo[0],y[i],yl[i],x[i],xl[i]); } LIS_QUAD_ADD_SSE2(dotm.hi[0],dotm.lo[0],dotm2.hi[0],dotm2.lo[0],dotm2.hi[1],dotm2.lo[1]); for(;i<n;i++) { LIS_QUAD_FMA_SSE2(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],y[i],yl[i],x[i],xl[i]); } #endif #endif #ifdef USE_MPI MPI_Allreduce(dotm.hi,tmpm.hi,1,LIS_MPI_MSCALAR,LIS_MPI_MSUM,comm); val->hi[0] = tmpm.hi[0]; val->lo[0] = tmpm.lo[0]; #else val->hi[0] = dotm.hi[0]; val->lo[0] = dotm.lo[0]; #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_nrm2ex_mm" LIS_INT lis_vector_nrm2ex_mm(LIS_VECTOR vx, LIS_QUAD_PTR *val) { LIS_INT i,n; LIS_SCALAR *x,*xl; LIS_QUAD_PTR dotm2,xy2,dotm,tmpm,xx; #ifdef _OPENMP LIS_INT is,ie,nprocs,my_rank; LIS_SCALAR *gt; #endif #ifdef USE_MPI MPI_Comm comm; #endif LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; xl = vx->value_lo; dotm2.hi = &vx->work[0]; dotm2.lo = &vx->work[2]; xy2.hi = &vx->work[4]; xy2.lo = &vx->work[6]; dotm.hi = &vx->work[8]; dotm.lo = &vx->work[9]; tmpm.hi = &vx->work[10]; tmpm.lo = &vx->work[11]; xx.hi = &vx->work[12]; xx.lo = &vx->work[13]; #ifdef USE_MPI comm = vx->comm; #endif #ifdef _OPENMP gt = lis_vec_tmp; nprocs = omp_get_max_threads(); #ifndef USE_SSE2 #pragma omp parallel private(i,is,ie,my_rank,p1,p2,tq,bhi,blo,chi,clo,sh,eh,sl,el,th,tl) #else #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); #ifndef USE_FMA2_SSE2 gt[my_rank*LIS_VEC_TMP_PADD] = gt[my_rank*LIS_VEC_TMP_PADD+1] = 0.0; #pragma cdir nodep for(i=is;i<ie;i++) { LIS_QUAD_FSA(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],x[i],xl[i]); } #else gt[my_rank*LIS_VEC_TMP_PADD ] = gt[my_rank*LIS_VEC_TMP_PADD+1] = 0.0; gt[my_rank*LIS_VEC_TMP_PADD+2] = gt[my_rank*LIS_VEC_TMP_PADD+3] = 0.0; #ifdef USE_VEC_COMP #pragma cdir nodep #endif for(i=is;i<ie-1;i+=2) { LIS_QUAD_FSA2_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],x[i],xl[i]); } LIS_QUAD_ADD_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+2],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD+3]); for(;i<ie;i++) { LIS_QUAD_FSA_SSE2(gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],gt[my_rank*LIS_VEC_TMP_PADD],gt[my_rank*LIS_VEC_TMP_PADD+1],x[i],xl[i]); } #endif } dotm.hi[0] = dotm.lo[0] = 0.0; for(i=0;i<nprocs;i++) { #ifndef USE_SSE2 LIS_QUAD_ADD(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],gt[i*LIS_VEC_TMP_PADD],gt[i*LIS_VEC_TMP_PADD+1]); #else LIS_QUAD_ADD_SSE2(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],gt[i*LIS_VEC_TMP_PADD],gt[i*LIS_VEC_TMP_PADD+1]); #endif } #else #ifndef USE_FMA2_SSE2 dotm.hi[0] = dotm.lo[0] = 0.0; #pragma cdir nodep for(i=0;i<n;i++) { LIS_QUAD_FSA(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],x[i],xl[i]); } #else dotm2.hi[0] = dotm2.hi[1] = 0.0; dotm2.lo[0] = dotm2.lo[1] = 0.0; for(i=0;i<n-1;i+=2) { LIS_QUAD_FSA2_SSE2(dotm2.hi[0],dotm2.lo[0],dotm2.hi[0],dotm2.lo[0],x[i],xl[i]); } LIS_QUAD_ADD_SSE2(dotm.hi[0],dotm.lo[0],dotm2.hi[0],dotm2.lo[0],dotm2.hi[1],dotm2.lo[1]); for(;i<n;i++) { LIS_QUAD_FSA_SSE2(dotm.hi[0],dotm.lo[0],dotm.hi[0],dotm.lo[0],x[i],xl[i]); } #endif #endif #ifdef USE_MPI MPI_Allreduce(dotm.hi,tmpm.hi,1,LIS_MPI_MSCALAR,LIS_MPI_MSUM,comm); #ifndef USE_SSE2 LIS_QUAD_SQRT(val->hi[0],val->lo[0],tmpm.hi[0],tmpm.lo[0]); #else LIS_QUAD_SQRT_SSE2(val->hi[0],val->lo[0],tmpm.hi[0],tmpm.lo[0]); #endif #else #ifndef USE_SSE2 LIS_QUAD_SQRT(val->hi[0],val->lo[0],dotm.hi[0],dotm.lo[0]); #else LIS_QUAD_SQRT_SSE2(val->hi[0],val->lo[0],dotm.hi[0],dotm.lo[0]); #endif #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_copyex_mm" LIS_INT lis_vector_copyex_mm(LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*xl,*y,*yl; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; xl = vx->value_lo; yl = vy->value_lo; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { y[i] = x[i]; yl[i] = xl[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_copyex_nm" LIS_INT lis_vector_copyex_nm(LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*y,*yl; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; yl = vy->value_lo; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { y[i] = x[i]; yl[i] = 0.0; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_copyex_mn" LIS_INT lis_vector_copyex_mn(LIS_VECTOR vx, LIS_VECTOR vy) { LIS_INT i,n; LIS_SCALAR *x,*y; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; y = vy->value; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { y[i] = x[i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_scaleex_nm" LIS_INT lis_vector_scaleex_nm(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n,is,ie,nprocs,my_rank; LIS_SCALAR *aa; LIS_SCALAR *x,*xl; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; xl = vx->value_lo; aa = vx->work; #ifndef USE_FMA2_SSE2 #pragma cdir nodep #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,eh,sl,el,th,tl) #else #pragma omp parallel for private(i,bh,ch,sh,th,bl,sl,tl,p1,p2,t0,t1,t2,is,ie,my_rank) #endif for(i=0; i<n; i++) { LIS_QUAD_MULD(x[i],xl[i],x[i],xl[i],alpha); } #else #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif aa[0] = aa[1] = alpha; #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel private(i,is,ie,my_rank,p1,p2,tq,bhi,blo,chi,clo,sh,eh,sl,el,th,tl) #else #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie-1;i+=2) { LIS_QUAD_MULD2_SSE2(x[i],xl[i],x[i],xl[i],aa[0]); } for(;i<ie;i++) { LIS_QUAD_MULD_SSE2(x[i],xl[i],x[i],xl[i],alpha); } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_scaleex_mm" LIS_INT lis_vector_scaleex_mm(LIS_QUAD_PTR alpha, LIS_VECTOR vx) { LIS_INT i,n,is,ie,nprocs,my_rank; LIS_QUAD_PTR aa; LIS_SCALAR *x,*xl; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; xl = vx->value_lo; aa.hi = &vx->work[0]; aa.lo = &vx->work[2]; #ifndef USE_FMA2_SSE2 #pragma cdir nodep #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,eh,sl,el,th,tl) for(i=0; i<n; i++) { LIS_QUAD_MUL(x[i],xl[i],x[i],xl[i],alpha.hi[0],alpha.lo[0]); } #else #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif aa.hi[0] = aa.hi[1] = alpha.hi[0]; aa.lo[0] = aa.lo[1] = alpha.lo[0]; #ifdef _OPENMP #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie-1;i+=2) { LIS_QUAD_MUL2_SSE2(x[i],xl[i],x[i],xl[i],aa.hi[0],aa.lo[0]); } for(;i<ie;i++) { LIS_QUAD_MUL_SSE2(x[i],xl[i],x[i],xl[i],aa.hi[0],aa.lo[0]); } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_allex_nm" LIS_INT lis_vector_set_allex_nm(LIS_SCALAR alpha, LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x,*xl; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; xl = vx->value_lo; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = alpha; xl[i] = 0.0; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_reciprocalex_m" LIS_INT lis_vector_reciprocalex_m(LIS_VECTOR vx) { LIS_INT i,n; LIS_SCALAR *x,*xl; LIS_SCALAR one_hi,one_lo; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; n = vx->n; x = vx->value; xl = vx->value_lo; one_hi = 1.0; one_lo = 0.0; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,eh,sl,el,th,tl) #else #pragma omp parallel private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0; i<n; i++) { #ifndef USE_SSE2 LIS_QUAD_DIV(x[i],xl[i],one_hi,one_lo,x[i],xl[i]); #else LIS_QUAD_DIV_SSE2(x[i],xl[i],one_hi,one_lo,x[i],xl[i]); #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #ifdef USE_MPI #undef __FUNC__ #define __FUNC__ "lis_mpi_msum" void lis_mpi_msum(LIS_QUAD *invec, LIS_QUAD *inoutvec, LIS_INT *len, MPI_Datatype *datatype) { LIS_INT i; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; for(i=0;i<*len;i++) { #ifndef USE_SSE2 LIS_QUAD_ADD(inoutvec[i].hi,inoutvec[i].lo,inoutvec[i].hi,inoutvec[i].lo,invec[i].hi,invec[i].lo); #else LIS_QUAD_ADD_SSE2(inoutvec[i].hi,inoutvec[i].lo,inoutvec[i].hi,inoutvec[i].lo,invec[i].hi,invec[i].lo); #endif } LIS_DEBUG_FUNC_OUT; } #undef __FUNC__ #define __FUNC__ "lis_send_recv_mp" LIS_INT lis_send_recv_mp(LIS_COMMTABLE commtable, LIS_VECTOR X) { LIS_INT neib,i,is,inum,neibpetot,k,pad; LIS_SCALAR *ws,*wr; LIS_SCALAR *x,*xl; LIS_INT *iw,err; LIS_DEBUG_FUNC_IN; neibpetot = commtable->neibpetot; ws = commtable->ws; wr = commtable->wr; pad = commtable->pad; x = X->value; xl = X->value_lo; for(neib=0;neib<neibpetot;neib++) { is = commtable->export_ptr[neib]; inum = commtable->export_ptr[neib+1] - is; for(i=0;i<inum;i++) { ws[is*2+i] = x[commtable->export_index[is+i]]; ws[is*2+inum+i] = xl[commtable->export_index[is+i]]; } MPI_Isend(&ws[is*2],inum*2,MPI_DOUBLE,commtable->neibpe[neib],0,commtable->comm,&commtable->req1[neib]); } for(neib=0;neib<neibpetot;neib++) { is = commtable->import_ptr[neib]; inum = commtable->import_ptr[neib+1] - is; MPI_Irecv(&wr[is*2],inum*2,MPI_DOUBLE,commtable->neibpe[neib],0,commtable->comm,&commtable->req2[neib]); } MPI_Waitall(neibpetot, commtable->req2, commtable->sta2); k = commtable->import_index[0]+pad; for(neib=0;neib<neibpetot;neib++) { is = commtable->import_ptr[neib]; inum = commtable->import_ptr[neib+1] - is; for(i=0;i<inum;i++) { x[k] = wr[is*2+i]; xl[k++] = wr[is*2+inum+i]; } } MPI_Waitall(neibpetot, commtable->req1, commtable->sta1); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_reduce_mp" LIS_INT lis_reduce_mp(LIS_COMMTABLE commtable, LIS_VECTOR X) { LIS_INT neib,i,k,is,inum,neibpetot,pad; LIS_SCALAR *x,*xl; LIS_SCALAR *ws,*wr; LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; neibpetot = commtable->neibpetot; ws = commtable->ws; wr = commtable->wr; pad = commtable->pad; x = X->value; xl = X->value_lo; for(neib=0;neib<neibpetot;neib++) { is = commtable->import_ptr[neib]; inum = commtable->import_ptr[neib+1] - is; for(i=0;i<inum;i++) { wr[is*2+i] = x[commtable->import_index[is+i]+pad]; wr[is*2+inum+i] = xl[commtable->import_index[is+i]+pad]; } MPI_Isend(&wr[is*2],inum*2,MPI_DOUBLE,commtable->neibpe[neib],0,commtable->comm,&commtable->req1[neib]); } for(neib=0;neib<neibpetot;neib++) { is = commtable->export_ptr[neib]; inum = commtable->export_ptr[neib+1] - is; MPI_Irecv(&ws[is*2],inum*2,MPI_DOUBLE,commtable->neibpe[neib],0,commtable->comm,&commtable->req2[neib]); } MPI_Waitall(neibpetot, commtable->req2, commtable->sta2); for(neib=0;neib<neibpetot;neib++) { is = commtable->export_ptr[neib]; inum = commtable->export_ptr[neib+1] - is; for(i=0;i<inum;i++) { /*x[commtable->export_index[i]] += ws[i];*/ #ifndef USE_SSE2 LIS_QUAD_ADD(x[commtable->export_index[is+i]],xl[commtable->export_index[is+i]],x[commtable->export_index[is+i]],xl[commtable->export_index[is+i]],ws[is*2+i],ws[is*2+inum+i]); #else LIS_QUAD_ADD_SSE2(x[commtable->export_index[is+i]],xl[commtable->export_index[is+i]],x[commtable->export_index[is+i]],xl[commtable->export_index[is+i]],ws[is*2+i],ws[is*2+inum+i]); #endif } } MPI_Waitall(neibpetot, commtable->req1, commtable->sta1); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #endif #endif
taskloop_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp taskloop simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd foo void test_no_clause() { int i; #pragma omp taskloop simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}} #pragma omp taskloop simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp taskloop simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}} #pragma omp taskloop simd nogroup nogroup for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp parallel #pragma omp taskloop simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel #pragma omp taskloop simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp taskloop simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp taskloop simd for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp taskloop simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected expression}} #pragma omp taskloop simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} omp50-error@+1 {{expected variable name}} #pragma omp taskloop simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp taskloop simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp taskloop simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp taskloop simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp taskloop simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp taskloop simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp taskloop simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; }
GB_binop__pair_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_fp32 // A.*B function (eWiseMult): GB_AemultB__pair_fp32 // A*D function (colscale): GB_AxD__pair_fp32 // D*A function (rowscale): GB_DxB__pair_fp32 // C+=B function (dense accum): GB_Cdense_accumB__pair_fp32 // C+=b function (dense accum): GB_Cdense_accumb__pair_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_fp32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: float // A type: float // B,b type: float // BinaryOp: cij = 1 #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP32 || GxB_NO_PAIR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pair_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unop__lnot_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_fp32_fp32) // op(A') function: GB (_unop_tran__lnot_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_wait.c
//------------------------------------------------------------------------------ // GB_wait: finish all pending computations on a single matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLS: GB_builder // This function is typically called via the GB_MATRIX_WAIT(A) macro, except // for GB_assign, GB_subassign, and GB_mxm. // The matrix A has zombies and/or pending tuples placed there by // GrB_setElement, GrB_*assign, or GB_mxm. Zombies must now be deleted, and // pending tuples must now be assembled together and added into the matrix. // The indices in A might also be jumbled; if so, they are sorted now. // When the function returns, and all pending tuples and zombies have been // deleted. This is true even the function fails due to lack of memory (in // that case, the matrix is cleared as well). // If A is hypersparse, the time taken is at most O(nnz(A) + t log t), where t // is the number of pending tuples in A, and nnz(A) includes both zombies and // live entries. There is no O(m) or O(n) time component, if A is m-by-n. // If the number of non-empty vectors of A grows too large, then A can be // converted to non-hypersparse. // If A is non-hypersparse, then O(n) is added in the worst case, to prune // zombies and to update the vector pointers for A. // If the method is successful, it does an OpenMP flush just before returning. #include "GB_select.h" #include "GB_add.h" #include "GB_Pending.h" #include "GB_build.h" #include "GB_jappend.h" #define GB_FREE_ALL \ { \ GB_FREE (&W, W_size) ; \ GB_phbix_free (A) ; \ GB_phbix_free (T) ; \ GB_phbix_free (S) ; \ GB_phbix_free (A1) ; \ } GB_PUBLIC GrB_Info GB_wait // finish all pending computations ( GrB_Matrix A, // matrix with pending computations const char *name, // name of the matrix GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_void *W = NULL ; size_t W_size = 0 ; struct GB_Matrix_opaque T_header, A1_header, S_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; GrB_Matrix A1 = NULL ; GrB_Matrix S = GB_clear_static_header (&S_header) ; GrB_Info info = GrB_SUCCESS ; ASSERT_MATRIX_OK (A, "A to wait", GB_FLIP (GB0)) ; if (GB_IS_FULL (A) || GB_IS_BITMAP (A)) { // full and bitmap matrices never have any pending work ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; // ensure the matrix is written to memory #pragma omp flush return (GrB_SUCCESS) ; } // only sparse and hypersparse matrices can have pending work ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // get the zombie and pending count, and burble if work needs to be done //-------------------------------------------------------------------------- int64_t nzombies = A->nzombies ; int64_t npending = GB_Pending_n (A) ; const bool A_iso = A->iso ; if (nzombies > 0 || npending > 0 || A->jumbled) { GB_BURBLE_MATRIX (A, "(%swait:%s " GBd " %s, " GBd " pending%s) ", A_iso ? "iso " : "", name, nzombies, (nzombies == 1) ? "zombie" : "zombies", npending, A->jumbled ? ", jumbled" : "") ; } //-------------------------------------------------------------------------- // determine the max # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // ensure A is not shallow //-------------------------------------------------------------------------- int64_t anz_orig = GB_nnz (A) ; int64_t asize = A->type->size ; ASSERT (!GB_is_shallow (A)) ; //-------------------------------------------------------------------------- // check if A only needs to be unjumbled //-------------------------------------------------------------------------- if (npending == 0 && nzombies == 0) { // A is not conformed, so the sparsity structure of A is not modified. // That is, if A has no pending tuples and no zombies, but is just // jumbled, then it stays sparse or hypersparse. GB_OK (GB_unjumble (A, Context)) ; return (info) ; } //-------------------------------------------------------------------------- // assemble the pending tuples into T //-------------------------------------------------------------------------- int64_t tnz = 0 ; if (npending > 0) { //---------------------------------------------------------------------- // construct a new hypersparse matrix T with just the pending tuples //---------------------------------------------------------------------- // T has the same type as A->type, which can differ from the type of // the pending tuples, A->Pending->type. The Pending->op can be NULL // (an implicit SECOND function), or it can be any accum operator. The // z=accum(x,y) operator can have any types, and it does not have to be // associative. T is constructed as iso if A is iso. GB_void *S_input = (A_iso) ? ((GB_void *) A->x) : NULL ; GrB_Type stype = (A_iso) ? A->type : A->Pending->type ; info = GB_builder ( T, // create T using a static header A->type, // T->type = A->type A->vlen, // T->vlen = A->vlen A->vdim, // T->vdim = A->vdim A->is_csc, // T->is_csc = A->is_csc &(A->Pending->i), // iwork_handle, becomes T->i on output &(A->Pending->i_size), &(A->Pending->j), // jwork_handle, free on output &(A->Pending->j_size), &(A->Pending->x), // Swork_handle, free on output &(A->Pending->x_size), A->Pending->sorted, // tuples may or may not be sorted false, // there might be duplicates; look for them A->Pending->nmax, // size of Pending->[ijx] arrays true, // is_matrix: unused NULL, NULL, S_input, // original I,J,S tuples, not used here A_iso, // pending tuples are iso if A is iso npending, // # of tuples A->Pending->op, // dup operator for assembling duplicates, // NULL if A is iso stype, // type of Pending->x Context ) ; //---------------------------------------------------------------------- // free pending tuples //---------------------------------------------------------------------- // The tuples have been converted to T, which is more compact, and // duplicates have been removed. The following work needs to be done // even if the builder fails. // GB_builder frees A->Pending->j and A->Pending->x. If successful, // A->Pending->i is now T->i. Otherwise A->Pending->i is freed. In // both cases, A->Pending->i is NULL. ASSERT (A->Pending->i == NULL) ; ASSERT (A->Pending->j == NULL) ; ASSERT (A->Pending->x == NULL) ; // free the list of pending tuples GB_Pending_free (&(A->Pending)) ; ASSERT (!GB_PENDING (A)) ; ASSERT_MATRIX_OK (A, "A after moving pending tuples to T", GB0) ; //---------------------------------------------------------------------- // check the status of the builder //---------------------------------------------------------------------- // Finally check the status of the builder. The pending tuples, must // be freed (just above), whether or not the builder is successful. if (info != GrB_SUCCESS) { // out of memory in GB_builder GB_FREE_ALL ; return (info) ; } ASSERT_MATRIX_OK (T, "T = hypersparse matrix of pending tuples", GB0) ; ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (!GB_ZOMBIES (T)) ; ASSERT (!GB_JUMBLED (T)) ; ASSERT (!GB_PENDING (T)) ; tnz = GB_nnz (T) ; ASSERT (tnz > 0) ; } //-------------------------------------------------------------------------- // delete zombies //-------------------------------------------------------------------------- // A zombie is an entry A(i,j) in the matrix that as been marked for // deletion, but hasn't been deleted yet. It is marked by "negating" // replacing its index i with GB_FLIP(i). // TODO: pass tnz to GB_selector, to pad the reallocated A matrix ASSERT_MATRIX_OK (A, "A before zombies removed", GB0) ; if (nzombies > 0) { // remove all zombies from A GB_OK (GB_selector (NULL /* A in-place */, GB_NONZOMBIE_opcode, NULL, false, A, 0, NULL, Context)) ; ASSERT (A->nzombies == (anz_orig - GB_nnz (A))) ; A->nzombies = 0 ; } ASSERT_MATRIX_OK (A, "A after zombies removed", GB0) ; // all the zombies are gone, and pending tuples are now in T ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // unjumble the matrix //-------------------------------------------------------------------------- GB_OK (GB_unjumble (A, Context)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_JUMBLED (A)) ; ASSERT (!GB_PENDING (A)) ; //-------------------------------------------------------------------------- // check for pending tuples //-------------------------------------------------------------------------- if (npending == 0) { // conform A to its desired sparsity structure and return result info = GB_conform (A, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // check for quick transplant //-------------------------------------------------------------------------- int64_t anz = GB_nnz (A) ; if (anz == 0) { // A has no entries so just transplant T into A, then free T and // conform A to its desired hypersparsity. info = GB_transplant_conform (A, A->type, &T, Context) ; #pragma omp flush return (info) ; } //-------------------------------------------------------------------------- // determine the method for A = A+T //-------------------------------------------------------------------------- // If anz > 0, T is hypersparse, even if A is a GrB_Vector ASSERT (GB_IS_HYPERSPARSE (T)) ; ASSERT (tnz > 0) ; ASSERT (T->nvec > 0) ; ASSERT (A->nvec > 0) ; // tjfirst = first vector in T int64_t tjfirst = T->h [0] ; int64_t anz0 = 0 ; int64_t kA = 0 ; int64_t jlast ; int64_t *restrict Ap = A->p ; int64_t *restrict Ah = A->h ; int64_t *restrict Ai = A->i ; GB_void *restrict Ax = (GB_void *) A->x ; int64_t anvec = A->nvec ; // anz0 = nnz (A0) = nnz (A (:, 0:tjfirst-1)), the region not modified by T if (A->h != NULL) { // find tjfirst in A->h int64_t pright = anvec - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (tjfirst, A->h, kA, pright, found) ; // A->h [0 ... kA-1] excludes vector tjfirst. The list // A->h [kA ... anvec-1] includes tjfirst. ASSERT (kA >= 0 && kA <= anvec) ; ASSERT (GB_IMPLIES (kA > 0 && kA < anvec, A->h [kA-1] < tjfirst)) ; ASSERT (GB_IMPLIES (found, A->h [kA] == tjfirst)) ; jlast = (kA > 0) ? A->h [kA-1] : (-1) ; } else { kA = tjfirst ; jlast = tjfirst - 1 ; } // anz1 = nnz (A1) = nnz (A (:, kA:end)), the region modified by T anz0 = A->p [kA] ; int64_t anz1 = anz - anz0 ; bool ignore ; // A + T will have anz_new entries int64_t anz_new = anz + tnz ; // must have at least this space if (2 * anz1 < anz0) { //---------------------------------------------------------------------- // append new tuples to A //---------------------------------------------------------------------- // A is growing incrementally. It splits into two parts: A = [A0 A1]. // where A0 = A (:, 0:kA-1) and A1 = A (:, kA:end). The // first part (A0 with anz0 = nnz (A0) entries) is not modified. The // second part (A1, with anz1 = nnz (A1) entries) overlaps with T. // If anz1 is zero, or small compared to anz0, then it is faster to // leave A0 unmodified, and to update just A1. // TODO: if A also had zombies, GB_selector could pad A so that // GB_nnz_max (A) is equal to anz + tnz. // make sure A has enough space for the new tuples if (anz_new > GB_nnz_max (A)) { // double the size if not enough space GB_OK (GB_ix_realloc (A, 2 * anz_new, Context)) ; Ai = A->i ; Ax = (GB_void *) A->x ; } //---------------------------------------------------------------------- // T = A1 + T //---------------------------------------------------------------------- if (anz1 > 0) { //------------------------------------------------------------------ // extract A1 = A (:, kA:end) as a shallow copy //------------------------------------------------------------------ // A1 = [0, A (:, kA:end)], hypersparse with same dimensions as A A1 = GB_clear_static_header (&A1_header) ; GB_OK (GB_new (&A1, true, // hyper, static header A->type, A->vlen, A->vdim, GB_Ap_malloc, A->is_csc, GxB_HYPERSPARSE, GB_ALWAYS_HYPER, anvec - kA, Context)) ; // the A1->i and A1->x content are shallow copies of A(:,kA:end). // They are not allocated pointers, but point to space inside // Ai and Ax. A1->x = (void *) (Ax + (A_iso ? 0 : (asize * anz0))) ; A1->x_size = (A_iso ? 1 : anz1) * asize ; A1->x_shallow = true ; A1->i = Ai + anz0 ; A1->i_size = anz1 * sizeof (int64_t) ; A1->i_shallow = true ; A1->iso = A_iso ; // OK // fill the column A1->h and A1->p with A->h and A->p, shifted int64_t *restrict A1p = A1->p ; int64_t *restrict A1h = A1->h ; int64_t a1nvec = 0 ; for (int64_t k = kA ; k < anvec ; k++) { // get A (:,k) int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; if (pA_end > pA_start) { // add this column to A1 if A (:,k) is not empty int64_t j = GBH (Ah, k) ; A1p [a1nvec] = pA_start - anz0 ; A1h [a1nvec] = j ; a1nvec++ ; } } // finalize A1 A1p [a1nvec] = anz1 ; A1->nvec = a1nvec ; A1->nvec_nonempty = a1nvec ; A1->magic = GB_MAGIC ; ASSERT_MATRIX_OK (A1, "A1 slice for GB_wait", GB0) ; //------------------------------------------------------------------ // S = A1 + T, with no operator or mask //------------------------------------------------------------------ GB_OK (GB_add (S, A->type, A->is_csc, NULL, 0, 0, &ignore, A1, T, NULL, Context)) ; ASSERT_MATRIX_OK (S, "S = A1+T", GB0) ; // free A1 and T GB_phbix_free (T) ; GB_phbix_free (A1) ; //------------------------------------------------------------------ // replace T with S //------------------------------------------------------------------ T = S ; S = NULL ; tnz = GB_nnz (T) ; //------------------------------------------------------------------ // remove A1 from the vectors of A, if A is hypersparse //------------------------------------------------------------------ if (A->h != NULL) { A->nvec = kA ; } } //---------------------------------------------------------------------- // append T to the end of A0 //---------------------------------------------------------------------- const int64_t *restrict Tp = T->p ; const int64_t *restrict Th = T->h ; const int64_t *restrict Ti = T->i ; int64_t tnvec = T->nvec ; anz = anz0 ; int64_t anz_last = anz ; int nthreads = GB_nthreads (tnz, chunk, nthreads_max) ; // append the indices and values of T to the end of A GB_memcpy (Ai + anz, Ti, tnz * sizeof (int64_t), nthreads) ; if (!A_iso) { const GB_void *restrict Tx = (GB_void *) T->x ; GB_memcpy (Ax + anz * asize, Tx, tnz * asize, nthreads) ; } // append the vectors of T to the end of A for (int64_t k = 0 ; k < tnvec ; k++) { int64_t j = Th [k] ; ASSERT (j >= tjfirst) ; anz += (Tp [k+1] - Tp [k]) ; GB_OK (GB_jappend (A, j, &jlast, anz, &anz_last, Context)) ; } GB_jwrapup (A, jlast, anz) ; ASSERT (anz == anz_new) ; // need to recompute the # of non-empty vectors in GB_conform A->nvec_nonempty = -1 ; // recomputed just below ASSERT_MATRIX_OK (A, "A after GB_wait:append", GB0) ; GB_phbix_free (T) ; // conform A to its desired sparsity structure info = GB_conform (A, Context) ; } else { //---------------------------------------------------------------------- // A = A+T //---------------------------------------------------------------------- // The update is not incremental since most of A is changing. Just do // a single parallel add: S=A+T, free T, and then transplant S back // into A. The nzmax of A is tight, with no room for future // incremental growth. // FUTURE:: if GB_add could tolerate zombies in A, then the initial // prune of zombies can be skipped. GB_OK (GB_add (S, A->type, A->is_csc, NULL, 0, 0, &ignore, A, T, NULL, Context)) ; GB_phbix_free (T) ; ASSERT_MATRIX_OK (S, "S after GB_wait:add", GB0) ; info = GB_transplant_conform (A, A->type, &S, Context) ; } //-------------------------------------------------------------------------- // flush the matrix and return result //-------------------------------------------------------------------------- #pragma omp flush return (info) ; }
taskloop-4.c
/* { dg-do run } */ /* { dg-options "-O2 -fopenmp" } */ int u[64], v; __attribute__((noinline, noclone)) int test (int a, int b, int c, int d, void (*fn) (int, int, int, int), int *num_tasks, int *min_iters, int *max_iters) { int i, t = 0; __builtin_memset (u, 0, sizeof u); v = 0; fn (a, b, c, d); *min_iters = 0; *max_iters = 0; *num_tasks = v; if (v) { *min_iters = u[0]; *max_iters = u[0]; t = u[0]; for (i = 1; i < v; i++) { if (*min_iters > u[i]) *min_iters = u[i]; if (*max_iters < u[i]) *max_iters = u[i]; t += u[i]; } } return t; } void grainsize (int a, int b, int c, int d) { int i, j = 0, k = 0; #pragma omp taskloop firstprivate (j, k) grainsize(d) for (i = a; i < b; i += c) { if (j == 0) { #pragma omp atomic capture k = v++; if (k >= 64) __builtin_abort (); } u[k] = ++j; } } void num_tasks (int a, int b, int c, int d) { int i, j = 0, k = 0; #pragma omp taskloop firstprivate (j, k) num_tasks(d) for (i = a; i < b; i += c) { if (j == 0) { #pragma omp atomic capture k = v++; if (k >= 64) __builtin_abort (); } u[k] = ++j; } } int main () { #pragma omp parallel #pragma omp single { int min_iters, max_iters, ntasks; /* If grainsize is present, # of task loop iters is >= grainsize && < 2 * grainsize, unless # of loop iterations is smaller than grainsize. */ if (test (0, 79, 1, 17, grainsize, &ntasks, &min_iters, &max_iters) != 79 || min_iters < 17 || max_iters >= 17 * 2) __builtin_abort (); if (test (-49, 2541, 7, 28, grainsize, &ntasks, &min_iters, &max_iters) != 370 || min_iters < 28 || max_iters >= 28 * 2) __builtin_abort (); if (test (7, 21, 2, 15, grainsize, &ntasks, &min_iters, &max_iters) != 7 || ntasks != 1 || min_iters != 7 || max_iters != 7) __builtin_abort (); /* If num_tasks is present, # of task loop iters is min (# of loop iters, num_tasks). */ if (test (-51, 2500, 48, 9, num_tasks, &ntasks, &min_iters, &max_iters) != 54 || ntasks != 9) __builtin_abort (); if (test (0, 25, 2, 17, num_tasks, &ntasks, &min_iters, &max_iters) != 13 || ntasks != 13) __builtin_abort (); } return 0; }
pixel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP IIIII X X EEEEE L % % P P I X X E L % % PPPP I X EEE L % % P I X X E L % % P IIIII X X EEEEE LLLLL % % % % MagickCore Methods to Import/Export Pixels % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C h a n n e l M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelChannelMap() acquires a pixel component map. % % The format of the AcquirePixelChannelMap() method is: % % PixelChannelMap *AcquirePixelChannelMap(void) % */ MagickExport PixelChannelMap *AcquirePixelChannelMap(void) { PixelChannelMap *channel_map; ssize_t i; channel_map=(PixelChannelMap *) AcquireQuantumMemory(MaxPixelChannels, sizeof(*channel_map)); if (channel_map == (PixelChannelMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_map,0,MaxPixelChannels*sizeof(*channel_map)); for (i=0; i < MaxPixelChannels; i++) channel_map[i].channel=(PixelChannel) i; return(channel_map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C h a n n e l M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelChannelMap() clones a pixel component map. % % The format of the ClonePixelChannelMap() method is: % % PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map) % % A description of each parameter follows: % % o channel_map: the pixel component map. % */ MagickExport PixelChannelMap *ClonePixelChannelMap(PixelChannelMap *channel_map) { PixelChannelMap *clone_map; assert(channel_map != (PixelChannelMap *) NULL); clone_map=AcquirePixelChannelMap(); if (clone_map == (PixelChannelMap *) NULL) return((PixelChannelMap *) NULL); (void) memcpy(clone_map,channel_map,MaxPixelChannels* sizeof(*channel_map)); return(clone_map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelInfo() makes a duplicate of the given pixel info structure, or if % pixel info is NULL, a new one. % % The format of the ClonePixelInfo method is: % % PixelInfo *ClonePixelInfo(const PixelInfo *pixel) % % A description of each parameter follows: % % o pixel: the pixel info. % */ MagickExport PixelInfo *ClonePixelInfo(const PixelInfo *pixel) { PixelInfo *pixel_info; pixel_info=(PixelInfo *) AcquireMagickMemory(sizeof(*pixel_info)); if (pixel_info == (PixelInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *pixel_info=(*pixel); return(pixel_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n f o r m P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConformPixelInfo() ensures the pixel conforms with the colorspace and alpha % attribute of the image. % % The format of the ConformPixelInfo method is: % % void *ConformPixelInfo((Image *image,const PixelInfo *source, % PixelInfo *destination,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source pixel info. % % o destination: the destination pixel info. % % o exception: return any errors or warnings in this structure. % */ MagickExport void ConformPixelInfo(Image *image,const PixelInfo *source, PixelInfo *destination,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(destination != (const PixelInfo *) NULL); *destination=(*source); if (image->colorspace == CMYKColorspace) { if (IssRGBCompatibleColorspace(destination->colorspace) != MagickFalse) ConvertRGBToCMYK(destination); } else if (destination->colorspace == CMYKColorspace) { if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) ConvertCMYKToRGB(destination); } if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,sRGBColorspace,exception); if ((destination->alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e c o d e P i x e l G a m m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DecodePixelGamma() applies the expansive power-law nonlinearity to the pixel. % % The format of the DecodePixelGamma method is: % % double DecodePixelGamma(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ static inline double DecodeGamma(const double x) { div_t quotient; double p, term[9]; int exponent; static const double coefficient[] = /* terms for x^(7/5), x=1.5 */ { 1.7917488588043277509, 0.82045614371976854984, 0.027694100686325412819, -0.00094244335181762134018, 0.000064355540911469709545, -5.7224404636060757485e-06, 5.8767669437311184313e-07, -6.6139920053589721168e-08, 7.9323242696227458163e-09 }; static const double powers_of_two[] = /* (2^x)^(7/5) */ { 1.0, 2.6390158215457883983, 6.9644045063689921093, 1.8379173679952558018e+01, 4.8502930128332728543e+01 }; /* Compute x^2.4 == x*x^(7/5) == pow(x,2.4). */ term[0]=1.0; term[1]=4.0*frexp(x,&exponent)-3.0; term[2]=2.0*term[1]*term[1]-term[0]; term[3]=2.0*term[1]*term[2]-term[1]; term[4]=2.0*term[1]*term[3]-term[2]; term[5]=2.0*term[1]*term[4]-term[3]; term[6]=2.0*term[1]*term[5]-term[4]; term[7]=2.0*term[1]*term[6]-term[5]; term[8]=2.0*term[1]*term[7]-term[6]; p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+ coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+ coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8]; quotient=div(exponent-1,5); if (quotient.rem < 0) { quotient.quot-=1; quotient.rem+=5; } return(x*ldexp(powers_of_two[quotient.rem]*p,7*quotient.quot)); } MagickExport MagickRealType DecodePixelGamma(const MagickRealType pixel) { if (pixel <= (0.0404482362771076*QuantumRange)) return(pixel/12.92f); return((MagickRealType) (QuantumRange*DecodeGamma((double) (QuantumScale* pixel+0.055)/1.055))); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C h a n n e l M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelChannelMap() deallocates memory associated with the pixel % channel map. % % The format of the DestroyPixelChannelMap() method is: % % PixelChannelMap *DestroyPixelChannelMap(PixelChannelMap *channel_map) % % A description of each parameter follows: % % o channel_map: the pixel component map. % */ MagickExport PixelChannelMap *DestroyPixelChannelMap( PixelChannelMap *channel_map) { assert(channel_map != (PixelChannelMap *) NULL); channel_map=(PixelChannelMap *) RelinquishMagickMemory(channel_map); return((PixelChannelMap *) RelinquishMagickMemory(channel_map)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E n c o d e P i x e l G a m m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EncodePixelGamma() cancels any nonlinearity in the pixel. % % The format of the EncodePixelGamma method is: % % MagickRealType EncodePixelGamma(const double MagickRealType) % % A description of each parameter follows: % % o pixel: the pixel. % */ static inline double EncodeGamma(const double x) { div_t quotient; double p, term[9]; int exponent; static const double coefficient[] = /* Chebychevi poly: x^(5/12), x=1.5 */ { 1.1758200232996901923, 0.16665763094889061230, -0.0083154894939042125035, 0.00075187976780420279038, -0.000083240178519391795367, 0.000010229209410070008679, -1.3400466409860246e-06, 1.8333422241635376682e-07, -2.5878596761348859722e-08 }; static const double powers_of_two[] = /* (2^N)^(5/12) */ { 1.0, 1.3348398541700343678, 1.7817974362806785482, 2.3784142300054420538, 3.1748021039363991669, 4.2378523774371812394, 5.6568542494923805819, 7.5509945014535482244, 1.0079368399158985525e1, 1.3454342644059433809e1, 1.7959392772949968275e1, 2.3972913230026907883e1 }; /* Compute x^(1/2.4) == x^(5/12) == pow(x,1.0/2.4). */ term[0]=1.0; term[1]=4.0*frexp(x,&exponent)-3.0; term[2]=2.0*term[1]*term[1]-term[0]; term[3]=2.0*term[1]*term[2]-term[1]; term[4]=2.0*term[1]*term[3]-term[2]; term[5]=2.0*term[1]*term[4]-term[3]; term[6]=2.0*term[1]*term[5]-term[4]; term[7]=2.0*term[1]*term[6]-term[5]; term[8]=2.0*term[1]*term[7]-term[6]; p=coefficient[0]*term[0]+coefficient[1]*term[1]+coefficient[2]*term[2]+ coefficient[3]*term[3]+coefficient[4]*term[4]+coefficient[5]*term[5]+ coefficient[6]*term[6]+coefficient[7]*term[7]+coefficient[8]*term[8]; quotient=div(exponent-1,12); if (quotient.rem < 0) { quotient.quot-=1; quotient.rem+=12; } return(ldexp(powers_of_two[quotient.rem]*p,5*quotient.quot)); } MagickExport MagickRealType EncodePixelGamma(const MagickRealType pixel) { if (pixel <= (0.0031306684425005883*QuantumRange)) return(12.92f*pixel); return((MagickRealType) QuantumRange*(1.055*EncodeGamma((double) QuantumScale* pixel)-0.055)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x p o r t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExportImagePixels() extracts pixel data from an image and returns it to you. % The method returns MagickTrue on success otherwise MagickFalse if an error is % encountered. The data is returned as char, short int, Quantum, unsigned int, % unsigned long long, float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % ExportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels,exception); % % The format of the ExportImagePixels method is: % % MagickBooleanType ExportImagePixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % const char *map,const StorageType type,void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,width,height: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o type: Define the data type of the pixels. Float and double types are % normalized to [0..1] otherwise [0..QuantumRange]. Choose from these % types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *), % LongPixel (unsigned int *), LongLongPixel (unsigned long long *), % QuantumPixel (Quantum *), or ShortPixel (unsigned short *). % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ExportCharPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; ssize_t x; unsigned char *magick_restrict q; size_t length; ssize_t y; q=(unsigned char *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar((Quantum) 0); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p))); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar((Quantum) 0); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=ScaleQuantumToChar(GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=ScaleQuantumToChar(GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=ScaleQuantumToChar(GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=ScaleQuantumToChar(GetPixelAlpha(image,p)); break; } case OpacityQuantum: { *q=ScaleQuantumToChar(GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=ScaleQuantumToChar(GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(image,p))); break; } default: break; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportDoublePixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; double *magick_restrict q; ssize_t x; size_t length; ssize_t y; q=(double *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelBlue(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelBlue(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelRed(image,p)); *q++=(double) (QuantumScale*GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelBlue(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelRed(image,p)); *q++=0.0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelIntensity(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelRed(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelRed(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelBlue(image,p)); *q++=(double) (QuantumScale*GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(double) (QuantumScale*GetPixelRed(image,p)); *q++=(double) (QuantumScale*GetPixelGreen(image,p)); *q++=(double) (QuantumScale*GetPixelBlue(image,p)); *q++=0.0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=(double) (QuantumScale*GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=(double) (QuantumScale*GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=(double) (QuantumScale*GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=(double) (QuantumScale*GetPixelAlpha(image,p)); break; } case OpacityQuantum: { *q=(double) (QuantumScale*GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=(double) (QuantumScale* GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=(double) (QuantumScale*GetPixelIntensity(image,p)); break; } default: *q=0; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportFloatPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; float *magick_restrict q; ssize_t x; size_t length; ssize_t y; q=(float *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelBlue(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelBlue(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelRed(image,p)); *q++=(float) (QuantumScale*GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelBlue(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelRed(image,p)); *q++=0.0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelIntensity(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelRed(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelRed(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelBlue(image,p)); *q++=(float) (QuantumScale*GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=(float) (QuantumScale*GetPixelRed(image,p)); *q++=(float) (QuantumScale*GetPixelGreen(image,p)); *q++=(float) (QuantumScale*GetPixelBlue(image,p)); *q++=0.0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=(float) (QuantumScale*GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=(float) (QuantumScale*GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=(float) (QuantumScale*GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=(float) (QuantumScale*((Quantum) (GetPixelAlpha(image,p)))); break; } case OpacityQuantum: { *q=(float) (QuantumScale*GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=(float) (QuantumScale* GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=(float) (QuantumScale*GetPixelIntensity(image,p)); break; } default: *q=0; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportLongPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; ssize_t x; unsigned int *magick_restrict q; size_t length; ssize_t y; q=(unsigned int *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLong(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelRed(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p))); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLong(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLong(GetPixelBlue(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=ScaleQuantumToLong(GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=ScaleQuantumToLong(GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=ScaleQuantumToLong(GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=ScaleQuantumToLong(GetPixelAlpha(image,p)); break; } case OpacityQuantum: { *q=ScaleQuantumToLong(GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=ScaleQuantumToLong(GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=ScaleQuantumToLong(ClampToQuantum(GetPixelIntensity(image,p))); break; } default: break; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportLongLongPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; ssize_t x; MagickSizeType *magick_restrict q; size_t length; ssize_t y; q=(MagickSizeType *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(ClampToQuantum( GetPixelIntensity(image,p))); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); *q++=ScaleQuantumToLongLong(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToLongLong(GetPixelRed(image,p)); *q++=ScaleQuantumToLongLong(GetPixelGreen(image,p)); *q++=ScaleQuantumToLongLong(GetPixelBlue(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=ScaleQuantumToLongLong(GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=ScaleQuantumToLongLong(GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=ScaleQuantumToLongLong(GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=ScaleQuantumToLongLong(GetPixelAlpha(image,p)); break; } case OpacityQuantum: { *q=ScaleQuantumToLongLong(GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=ScaleQuantumToLongLong(GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=ScaleQuantumToLongLong(ClampToQuantum( GetPixelIntensity(image,p))); break; } default: break; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportQuantumPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; q=(Quantum *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelBlue(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelRed(image,p); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelBlue(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelRed(image,p); *q++=(Quantum) (GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelBlue(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelRed(image,p); *q++=(Quantum) 0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ClampToQuantum(GetPixelIntensity(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelRed(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelBlue(image,p); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelRed(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelBlue(image,p); *q++=(Quantum) (GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=GetPixelRed(image,p); *q++=GetPixelGreen(image,p); *q++=GetPixelBlue(image,p); *q++=(Quantum) 0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=(Quantum) 0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=GetPixelRed(image,p); break; } case GreenQuantum: case MagentaQuantum: { *q=GetPixelGreen(image,p); break; } case BlueQuantum: case YellowQuantum: { *q=GetPixelBlue(image,p); break; } case AlphaQuantum: { *q=GetPixelAlpha(image,p); break; } case OpacityQuantum: { *q=GetPixelAlpha(image,p); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=GetPixelBlack(image,p); break; } case IndexQuantum: { *q=ClampToQuantum(GetPixelIntensity(image,p)); break; } default: { *q=(Quantum) 0; break; } } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ExportShortPixel(const Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; ssize_t x; unsigned short *magick_restrict q; size_t length; ssize_t y; q=(unsigned short *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelRed(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=ScaleQuantumToShort(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p))); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=ScaleQuantumToShort(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=0; p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { p=GetVirtualPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { *q=0; switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { *q=ScaleQuantumToShort(GetPixelRed(image,p)); break; } case GreenQuantum: case MagentaQuantum: { *q=ScaleQuantumToShort(GetPixelGreen(image,p)); break; } case BlueQuantum: case YellowQuantum: { *q=ScaleQuantumToShort(GetPixelBlue(image,p)); break; } case AlphaQuantum: { *q=ScaleQuantumToShort(GetPixelAlpha(image,p)); break; } case OpacityQuantum: { *q=ScaleQuantumToShort(GetPixelAlpha(image,p)); break; } case BlackQuantum: { if (image->colorspace == CMYKColorspace) *q=ScaleQuantumToShort(GetPixelBlack(image,p)); break; } case IndexQuantum: { *q=ScaleQuantumToShort(ClampToQuantum(GetPixelIntensity(image,p))); break; } default: break; } q++; } p+=GetPixelChannels(image); } } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } MagickExport MagickBooleanType ExportImagePixels(const Image *image, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const char *map,const StorageType type,void *pixels,ExceptionInfo *exception) { MagickBooleanType status; QuantumType *quantum_map; RectangleInfo roi; ssize_t i; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=strlen(map); quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map)); if (quantum_map == (QuantumType *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'A': case 'a': { quantum_map[i]=AlphaQuantum; break; } case 'B': case 'b': { quantum_map[i]=BlueQuantum; break; } case 'C': case 'c': { quantum_map[i]=CyanQuantum; if (image->colorspace == CMYKColorspace) break; quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",map); return(MagickFalse); } case 'g': case 'G': { quantum_map[i]=GreenQuantum; break; } case 'I': case 'i': { quantum_map[i]=IndexQuantum; break; } case 'K': case 'k': { quantum_map[i]=BlackQuantum; if (image->colorspace == CMYKColorspace) break; quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",map); return(MagickFalse); } case 'M': case 'm': { quantum_map[i]=MagentaQuantum; if (image->colorspace == CMYKColorspace) break; quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",map); return(MagickFalse); } case 'o': case 'O': { quantum_map[i]=OpacityQuantum; break; } case 'P': case 'p': { quantum_map[i]=UndefinedQuantum; break; } case 'R': case 'r': { quantum_map[i]=RedQuantum; break; } case 'Y': case 'y': { quantum_map[i]=YellowQuantum; if (image->colorspace == CMYKColorspace) break; quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",map); return(MagickFalse); } default: { quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedPixelMap","`%s'",map); return(MagickFalse); } } } roi.width=width; roi.height=height; roi.x=x; roi.y=y; switch (type) { case CharPixel: { status=ExportCharPixel(image,&roi,map,quantum_map,pixels,exception); break; } case DoublePixel: { status=ExportDoublePixel(image,&roi,map,quantum_map,pixels,exception); break; } case FloatPixel: { status=ExportFloatPixel(image,&roi,map,quantum_map,pixels,exception); break; } case LongPixel: { status=ExportLongPixel(image,&roi,map,quantum_map,pixels,exception); break; } case LongLongPixel: { status=ExportLongLongPixel(image,&roi,map,quantum_map,pixels,exception); break; } case QuantumPixel: { status=ExportQuantumPixel(image,&roi,map,quantum_map,pixels,exception); break; } case ShortPixel: { status=ExportShortPixel(image,&roi,map,quantum_map,pixels,exception); break; } default: { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedPixelMap","`%s'",map); status=MagickFalse; } } quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelInfo() initializes the PixelInfo structure. % % The format of the GetPixelInfo method is: % % GetPixelInfo(const Image *image,PixelInfo *pixel) % % A description of each parameter follows: % % o image: the image. (optional - may be NULL) % % o pixel: Specifies a pointer to a PixelInfo structure. % */ MagickExport void GetPixelInfo(const Image *image,PixelInfo *pixel) { (void) memset(pixel,0,sizeof(*pixel)); pixel->storage_class=DirectClass; pixel->colorspace=sRGBColorspace; pixel->depth=MAGICKCORE_QUANTUM_DEPTH; pixel->alpha_trait=UndefinedPixelTrait; pixel->alpha=(double) OpaqueAlpha; if (image == (const Image *) NULL) return; pixel->storage_class=image->storage_class; pixel->colorspace=image->colorspace; pixel->alpha_trait=image->alpha_trait; pixel->depth=image->depth; pixel->fuzz=image->fuzz; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l I n d o I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelInfoIntensity() returns a single sample intensity value from the red, % green, and blue components of a pixel based on the selected method: % % Rec601Luma 0.298839R' + 0.586811G' + 0.114350B' % Rec601Luminance 0.298839R + 0.586811G + 0.114350B % Rec709Luma 0.212656R' + 0.715158G' + 0.072186B' % Rec709Luminance 0.212656R + 0.715158G + 0.072186B % Brightness max(R', G', B') % Lightness (min(R', G', B') + max(R', G', B')) / 2.0 % % MS (R^2 + G^2 + B^2) / 3.0 % RMS sqrt((R^2 + G^2 + B^2) / 3.0 % Average (R + G + B') / 3.0 % % The format of the GetPixelInfoIntensity method is: % % MagickRealType GetPixelInfoIntensity(const Image *image, % const Quantum *pixel) % % A description of each parameter follows: % % o image: the image. % % o pixel: Specifies a pointer to a Quantum structure. % */ MagickExport MagickRealType GetPixelInfoIntensity( const Image *magick_restrict image,const PixelInfo *magick_restrict pixel) { MagickRealType blue, green, red, intensity; PixelIntensityMethod method; method=Rec709LumaPixelIntensityMethod; if (image != (const Image *) NULL) method=image->intensity; red=pixel->red; green=pixel->green; blue=pixel->blue; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/ (3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (pixel->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (pixel->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (pixel->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (pixel->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/ sqrt(3.0)); break; } } return(intensity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelIntensity() returns a single sample intensity value from the red, % green, and blue components of a pixel based on the selected method: % % Rec601Luma 0.298839R' + 0.586811G' + 0.114350B' % Rec601Luminance 0.298839R + 0.586811G + 0.114350B % Rec709Luma 0.212656R' + 0.715158G' + 0.072186B' % Rec709Luminance 0.212656R + 0.715158G + 0.072186B % Brightness max(R', G', B') % Lightness (min(R', G', B') + max(R', G', B')) / 2.0 % % MS (R^2 + G^2 + B^2) / 3.0 % RMS sqrt((R^2 + G^2 + B^2) / 3.0 % Average (R + G + B') / 3.0 % % The format of the GetPixelIntensity method is: % % MagickRealType GetPixelIntensity(const Image *image, % const Quantum *pixel) % % A description of each parameter follows: % % o image: the image. % % o pixel: Specifies a pointer to a Quantum structure. % */ MagickExport MagickRealType GetPixelIntensity( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,pixel); if (image->number_channels == 1) return(red); green=(MagickRealType) GetPixelGreen(image,pixel); blue=(MagickRealType) GetPixelBlue(image,pixel); switch (image->intensity) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+blue*blue)/ (3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if ((image->colorspace == RGBColorspace) || (image->colorspace == LinearGRAYColorspace)) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if ((image->colorspace == sRGBColorspace) || (image->colorspace == GRAYColorspace)) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if ((image->colorspace == RGBColorspace) || (image->colorspace == LinearGRAYColorspace)) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if ((image->colorspace == sRGBColorspace) || (image->colorspace == GRAYColorspace)) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+blue*blue)/ sqrt(3.0)); break; } } return(intensity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p o r t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImportImagePixels() accepts pixel data and stores in the image at the % location you specify. The method returns MagickTrue on success otherwise % MagickFalse if an error is encountered. The pixel data can be either char, % Quantum, short int, unsigned int, unsigned long long, float, or double in % the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % ImportImagePixels(image,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the ImportImagePixels method is: % % MagickBooleanType ImportImagePixels(Image *image,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % const char *map,const StorageType type,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,width,height: These values define the perimeter % of a region of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o type: Define the data type of the pixels. Float and double types are % normalized to [0..1] otherwise [0..QuantumRange]. Choose from these % types: CharPixel (char *), DoublePixel (double *), FloatPixel (float *), % LongPixel (unsigned int *), LongLongPixel (unsigned long long *), % QuantumPixel (Quantum *), or ShortPixel (unsigned short *). % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ImportCharPixel(Image *image,const RectangleInfo *roi, const char *magick_restrict map,const QuantumType *quantum_map, const void *pixels,ExceptionInfo *exception) { const unsigned char *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const unsigned char *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelRed(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRO") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelRed(image,ScaleCharToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBO") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); SetPixelAlpha(image,ScaleCharToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleCharToQuantum(*p++),q); SetPixelGreen(image,ScaleCharToQuantum(*p++),q); SetPixelBlue(image,ScaleCharToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ScaleCharToQuantum(*p),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ScaleCharToQuantum(*p),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ScaleCharToQuantum(*p),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ScaleCharToQuantum(*p),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ScaleCharToQuantum(*p),q); break; } case BlackQuantum: { SetPixelBlack(image,ScaleCharToQuantum(*p),q); break; } case IndexQuantum: { SetPixelGray(image,ScaleCharToQuantum(*p),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportDoublePixel(Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception) { const double *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const double *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case BlackQuantum: { SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case IndexQuantum: { SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportFloatPixel(Image *image,const RectangleInfo *roi, const char *magick_restrict map,const QuantumType *quantum_map, const void *pixels,ExceptionInfo *exception) { const float *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const float *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); p++; SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case BlackQuantum: { SetPixelBlack(image,ClampToQuantum(QuantumRange*(*p)),q); break; } case IndexQuantum: { SetPixelGray(image,ClampToQuantum(QuantumRange*(*p)),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportLongPixel(Image *image,const RectangleInfo *roi, const char *magick_restrict map,const QuantumType *quantum_map, const void *pixels,ExceptionInfo *exception) { const unsigned int *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const unsigned int *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongToQuantum(*p++),q); SetPixelAlpha(image,ScaleLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ScaleLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongToQuantum(*p++),q); SetPixelAlpha(image,ScaleLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ScaleLongToQuantum(*p),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ScaleLongToQuantum(*p),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ScaleLongToQuantum(*p),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ScaleLongToQuantum(*p),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ScaleLongToQuantum(*p),q); break; } case BlackQuantum: { SetPixelBlack(image,ScaleLongToQuantum(*p),q); break; } case IndexQuantum: { SetPixelGray(image,ScaleLongToQuantum(*p),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportLongLongPixel(Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception) { const MagickSizeType *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const MagickSizeType *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ScaleLongLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); SetPixelAlpha(image,ScaleLongLongToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleLongLongToQuantum(*p++),q); SetPixelGreen(image,ScaleLongLongToQuantum(*p++),q); SetPixelBlue(image,ScaleLongLongToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ScaleLongLongToQuantum(*p),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ScaleLongLongToQuantum(*p),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ScaleLongLongToQuantum(*p),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ScaleLongLongToQuantum(*p),q); break; } case BlackQuantum: { SetPixelBlack(image,ScaleLongLongToQuantum(*p),q); break; } case IndexQuantum: { SetPixelGray(image,ScaleLongLongToQuantum(*p),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportQuantumPixel(Image *image, const RectangleInfo *roi,const char *magick_restrict map, const QuantumType *quantum_map,const void *pixels,ExceptionInfo *exception) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const Quantum *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelRed(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelRed(image,*p++,q); SetPixelAlpha(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelRed(image,*p++,q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); SetPixelAlpha(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,*p++,q); SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,*p,q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,*p,q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,*p,q); break; } case AlphaQuantum: { SetPixelAlpha(image,*p,q); break; } case OpacityQuantum: { SetPixelAlpha(image,*p,q); break; } case BlackQuantum: { SetPixelBlack(image,*p,q); break; } case IndexQuantum: { SetPixelGray(image,*p,q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } static MagickBooleanType ImportShortPixel(Image *image,const RectangleInfo *roi, const char *magick_restrict map,const QuantumType *quantum_map, const void *pixels,ExceptionInfo *exception) { const unsigned short *magick_restrict p; Quantum *magick_restrict q; ssize_t x; size_t length; ssize_t y; p=(const unsigned short *) pixels; if (LocaleCompare(map,"BGR") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelRed(image,ScaleShortToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelRed(image,ScaleShortToQuantum(*p++),q); SetPixelAlpha(image,ScaleShortToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"BGRP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelBlue(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelRed(image,ScaleShortToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"I") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelGray(image,ScaleShortToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGB") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelBlue(image,ScaleShortToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBA") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelBlue(image,ScaleShortToQuantum(*p++),q); SetPixelAlpha(image,ScaleShortToQuantum(*p++),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } if (LocaleCompare(map,"RGBP") == 0) { for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { SetPixelRed(image,ScaleShortToQuantum(*p++),q); SetPixelGreen(image,ScaleShortToQuantum(*p++),q); SetPixelBlue(image,ScaleShortToQuantum(*p++),q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } length=strlen(map); for (y=0; y < (ssize_t) roi->height; y++) { q=GetAuthenticPixels(image,roi->x,roi->y+y,roi->width,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) roi->width; x++) { ssize_t i; for (i=0; i < (ssize_t) length; i++) { switch (quantum_map[i]) { case RedQuantum: case CyanQuantum: { SetPixelRed(image,ScaleShortToQuantum(*p),q); break; } case GreenQuantum: case MagentaQuantum: { SetPixelGreen(image,ScaleShortToQuantum(*p),q); break; } case BlueQuantum: case YellowQuantum: { SetPixelBlue(image,ScaleShortToQuantum(*p),q); break; } case AlphaQuantum: { SetPixelAlpha(image,ScaleShortToQuantum(*p),q); break; } case OpacityQuantum: { SetPixelAlpha(image,ScaleShortToQuantum(*p),q); break; } case BlackQuantum: { SetPixelBlack(image,ScaleShortToQuantum(*p),q); break; } case IndexQuantum: { SetPixelGray(image,ScaleShortToQuantum(*p),q); break; } default: break; } p++; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } return(y < (ssize_t) roi->height ? MagickFalse : MagickTrue); } MagickExport MagickBooleanType ImportImagePixels(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height,const char *map, const StorageType type,const void *pixels,ExceptionInfo *exception) { MagickBooleanType status; QuantumType *quantum_map; RectangleInfo roi; ssize_t i; size_t length; /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=strlen(map); quantum_map=(QuantumType *) AcquireQuantumMemory(length,sizeof(*quantum_map)); if (quantum_map == (QuantumType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': { quantum_map[i]=AlphaQuantum; image->alpha_trait=BlendPixelTrait; break; } case 'B': case 'b': { quantum_map[i]=BlueQuantum; break; } case 'C': case 'c': { quantum_map[i]=CyanQuantum; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } case 'g': case 'G': { quantum_map[i]=GreenQuantum; break; } case 'K': case 'k': { quantum_map[i]=BlackQuantum; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } case 'I': case 'i': { quantum_map[i]=IndexQuantum; (void) SetImageColorspace(image,GRAYColorspace,exception); break; } case 'm': case 'M': { quantum_map[i]=MagentaQuantum; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } case 'O': case 'o': { quantum_map[i]=OpacityQuantum; image->alpha_trait=BlendPixelTrait; break; } case 'P': case 'p': { quantum_map[i]=UndefinedQuantum; break; } case 'R': case 'r': { quantum_map[i]=RedQuantum; break; } case 'Y': case 'y': { quantum_map[i]=YellowQuantum; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } default: { quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedPixelMap","`%s'",map); return(MagickFalse); } } } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Transfer the pixels from the pixel data to the image. */ roi.width=width; roi.height=height; roi.x=x; roi.y=y; switch (type) { case CharPixel: { status=ImportCharPixel(image,&roi,map,quantum_map,pixels,exception); break; } case DoublePixel: { status=ImportDoublePixel(image,&roi,map,quantum_map,pixels,exception); break; } case FloatPixel: { status=ImportFloatPixel(image,&roi,map,quantum_map,pixels,exception); break; } case LongPixel: { status=ImportLongPixel(image,&roi,map,quantum_map,pixels,exception); break; } case LongLongPixel: { status=ImportLongLongPixel(image,&roi,map,quantum_map,pixels,exception); break; } case QuantumPixel: { status=ImportQuantumPixel(image,&roi,map,quantum_map,pixels,exception); break; } case ShortPixel: { status=ImportShortPixel(image,&roi,map,quantum_map,pixels,exception); break; } default: { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedStorageType","`%d'",type); status=MagickFalse; } } quantum_map=(QuantumType *) RelinquishMagickMemory(quantum_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e P i x e l C h a n n e l M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializePixelChannelMap() defines the standard pixel component map. % % The format of the InitializePixelChannelMap() method is: % % void InitializePixelChannelMap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void InitializePixelChannelMap(Image *image) { PixelTrait trait; ssize_t i; ssize_t n; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) memset(image->channel_map,0,MaxPixelChannels* sizeof(*image->channel_map)); trait=UpdatePixelTrait; if (image->alpha_trait != UndefinedPixelTrait) trait=(PixelTrait) (trait | BlendPixelTrait); n=0; if ((image->colorspace == LinearGRAYColorspace) || (image->colorspace == GRAYColorspace)) { SetPixelChannelAttributes(image,BluePixelChannel,trait,n); SetPixelChannelAttributes(image,GreenPixelChannel,trait,n); SetPixelChannelAttributes(image,RedPixelChannel,trait,n++); } else { SetPixelChannelAttributes(image,RedPixelChannel,trait,n++); SetPixelChannelAttributes(image,GreenPixelChannel,trait,n++); SetPixelChannelAttributes(image,BluePixelChannel,trait,n++); } if (image->colorspace == CMYKColorspace) SetPixelChannelAttributes(image,BlackPixelChannel,trait,n++); for (i=0; i < (ssize_t) image->number_meta_channels; i++) { SetPixelChannelAttributes(image,(PixelChannel) n,UpdatePixelTrait,n); n++; } if (image->alpha_trait != UndefinedPixelTrait) SetPixelChannelAttributes(image,AlphaPixelChannel,CopyPixelTrait,n++); if (image->storage_class == PseudoClass) SetPixelChannelAttributes(image,IndexPixelChannel,CopyPixelTrait,n++); if ((image->channels & ReadMaskChannel) != 0) SetPixelChannelAttributes(image,ReadMaskPixelChannel,CopyPixelTrait,n++); if ((image->channels & WriteMaskChannel) != 0) SetPixelChannelAttributes(image,WriteMaskPixelChannel,CopyPixelTrait,n++); if ((image->channels & CompositeMaskChannel) != 0) SetPixelChannelAttributes(image,CompositeMaskPixelChannel,CopyPixelTrait, n++); image->number_channels=(size_t) n; (void) SetPixelChannelMask(image,image->channel_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelChannel() applies a pixel interpolation method between a % floating point coordinate and the pixels surrounding that coordinate. No % pixel area resampling, or scaling of the result is performed. % % Interpolation is restricted to just the specified channel. % % The format of the InterpolatePixelChannel method is: % % MagickBooleanType InterpolatePixelChannel( % const Image *magick_restrict image,const CacheView *image_view, % const PixelChannel channel,const PixelInterpolateMethod method, % const double x,const double y,double *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image view. % % o channel: the pixel channel to interpolate. % % o method: the pixel color interpolation method. % % o x,y: A double representing the current (x,y) position of the pixel. % % o pixel: return the interpolated pixel here. % % o exception: return any errors or warnings in this structure. % */ static inline void CatromWeights(const double x,double (*weights)[4]) { double alpha, beta, gamma; /* Nicolas Robidoux' 10 flops (4* + 5- + 1+) refactoring of the computation of the standard four 1D Catmull-Rom weights. The sampling location is assumed between the second and third input pixel locations, and x is the position relative to the second input pixel location. Formulas originally derived for the VIPS (Virtual Image Processing System) library. */ alpha=(double) 1.0-x; beta=(double) (-0.5)*x*alpha; (*weights)[0]=alpha*beta; (*weights)[3]=x*beta; /* The following computation of the inner weights from the outer ones work for all Keys cubics. */ gamma=(*weights)[3]-(*weights)[0]; (*weights)[1]=alpha-(*weights)[0]+gamma; (*weights)[2]=x-(*weights)[3]-gamma; } static inline void SplineWeights(const double x,double (*weights)[4]) { double alpha, beta; /* Nicolas Robidoux' 12 flops (6* + 5- + 1+) refactoring of the computation of the standard four 1D cubic B-spline smoothing weights. The sampling location is assumed between the second and third input pixel locations, and x is the position relative to the second input pixel location. */ alpha=(double) 1.0-x; (*weights)[3]=(double) (1.0/6.0)*x*x*x; (*weights)[0]=(double) (1.0/6.0)*alpha*alpha*alpha; beta=(*weights)[3]-(*weights)[0]; (*weights)[1]=alpha-(*weights)[0]+beta; (*weights)[2]=x-(*weights)[3]-beta; } static inline double MeshInterpolate(const PointInfo *delta,const double p, const double x,const double y) { return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p); } MagickExport MagickBooleanType InterpolatePixelChannel( const Image *magick_restrict image,const CacheView_ *image_view, const PixelChannel channel,const PixelInterpolateMethod method, const double x,const double y,double *pixel,ExceptionInfo *exception) { double alpha[16], gamma, pixels[16]; MagickBooleanType status; PixelInterpolateMethod interpolate; PixelTrait traits; const Quantum *magick_restrict p; ssize_t i; ssize_t x_offset, y_offset; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image_view != (CacheView *) NULL); status=MagickTrue; *pixel=0.0; traits=GetPixelChannelTraits(image,channel); x_offset=CastDoubleToLong(floor(x)); y_offset=CastDoubleToLong(floor(y)); interpolate=method; if (interpolate == UndefinedInterpolatePixel) interpolate=image->interpolate; switch (interpolate) { case AverageInterpolatePixel: /* nearest 4 neighbours */ case Average9InterpolatePixel: /* nearest 9 neighbours */ case Average16InterpolatePixel: /* nearest 16 neighbours */ { ssize_t count; count=2; /* size of the area to average - default nearest 4 */ if (interpolate == Average9InterpolatePixel) { count=3; x_offset=CastDoubleToLong(floor(x+0.5)-1.0); y_offset=CastDoubleToLong(floor(y+0.5)-1.0); } else if (interpolate == Average16InterpolatePixel) { count=4; x_offset--; y_offset--; } p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count, (size_t) count,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } count*=count; /* Number of pixels to average */ if ((traits & BlendPixelTrait) == 0) for (i=0; i < (ssize_t) count; i++) { alpha[i]=1.0; pixels[i]=(double) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < (ssize_t) count; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } for (i=0; i < (ssize_t) count; i++) { gamma=PerceptibleReciprocal(alpha[i])/count; *pixel+=gamma*pixels[i]; } break; } case BilinearInterpolatePixel: default: { PointInfo delta, epsilon; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } if ((traits & BlendPixelTrait) == 0) for (i=0; i < 4; i++) { alpha[i]=1.0; pixels[i]=(double) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < 4; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } delta.x=x-x_offset; delta.y=y-y_offset; epsilon.x=1.0-delta.x; epsilon.y=1.0-delta.y; gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y* (epsilon.x*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); *pixel=gamma*(epsilon.y*(epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y* (epsilon.x*pixels[2]+delta.x*pixels[3])); break; } case BlendInterpolatePixel: { p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } if ((traits & BlendPixelTrait) == 0) for (i=0; i < 4; i++) { alpha[i]=1.0; pixels[i]=(MagickRealType) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < 4; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } gamma=1.0; /* number of pixels blended together (its variable) */ for (i=0; i <= 1L; i++) { if ((y-y_offset) >= 0.75) { alpha[i]=alpha[i+2]; /* take right pixels */ pixels[i]=pixels[i+2]; } else if ((y-y_offset) > 0.25) { gamma=2.0; /* blend both pixels in row */ alpha[i]+=alpha[i+2]; /* add up alpha weights */ pixels[i]+=pixels[i+2]; } } if ((x-x_offset) >= 0.75) { alpha[0]=alpha[1]; /* take bottom row blend */ pixels[0]=pixels[1]; } else if ((x-x_offset) > 0.25) { gamma*=2.0; /* blend both rows */ alpha[0]+=alpha[1]; /* add up alpha weights */ pixels[0]+=pixels[1]; } if (channel != AlphaPixelChannel) gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */ else gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */ *pixel=gamma*pixels[0]; break; } case CatromInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } if ((traits & BlendPixelTrait) == 0) for (i=0; i < 16; i++) { alpha[i]=1.0; pixels[i]=(double) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < 16; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } CatromWeights((double) (x-x_offset),&cx); CatromWeights((double) (y-y_offset),&cy); gamma=(channel == AlphaPixelChannel ? (double) 1.0 : PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]* alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]* alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]* alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+ cx[2]*alpha[14]+cx[3]*alpha[15]))); *pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+ cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]* pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+ cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]* pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15])); break; } case IntegerInterpolatePixel: { p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } *pixel=(double) GetPixelChannel(image,channel,p); break; } case NearestInterpolatePixel: { x_offset=CastDoubleToLong(floor(x+0.5)); y_offset=CastDoubleToLong(floor(y+0.5)); p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } *pixel=(double) GetPixelChannel(image,channel,p); break; } case MeshInterpolatePixel: { PointInfo delta, luminance; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } if ((traits & BlendPixelTrait) == 0) for (i=0; i < 4; i++) { alpha[i]=1.0; pixels[i]=(double) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < 4; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } delta.x=x-x_offset; delta.y=y-y_offset; luminance.x=GetPixelLuma(image,p)-(double) GetPixelLuma(image,p+3*GetPixelChannels(image)); luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double) GetPixelLuma(image,p+2*GetPixelChannels(image)); if (fabs((double) luminance.x) < fabs((double) luminance.y)) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel: 2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); *pixel=gamma*MeshInterpolate(&delta,pixels[2],pixels[3], pixels[0]); } else { /* Top-right triangle (pixel: 1, diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); *pixel=gamma*MeshInterpolate(&delta,pixels[1],pixels[0], pixels[3]); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel: 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); *pixel=gamma*MeshInterpolate(&delta,pixels[0],pixels[1], pixels[2]); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); *pixel=gamma*MeshInterpolate(&delta,pixels[3],pixels[2], pixels[1]); } } break; } case SplineInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } if ((traits & BlendPixelTrait) == 0) for (i=0; i < 16; i++) { alpha[i]=1.0; pixels[i]=(double) p[i*GetPixelChannels(image)+channel]; } else for (i=0; i < 16; i++) { alpha[i]=QuantumScale*GetPixelAlpha(image,p+i* GetPixelChannels(image)); pixels[i]=alpha[i]*p[i*GetPixelChannels(image)+channel]; } SplineWeights((double) (x-x_offset),&cx); SplineWeights((double) (y-y_offset),&cy); gamma=(channel == AlphaPixelChannel ? (double) 1.0 : PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]* alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]* alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]* alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+ cx[2]*alpha[14]+cx[3]*alpha[15]))); *pixel=gamma*(cy[0]*(cx[0]*pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+ cx[3]*pixels[3])+cy[1]*(cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]* pixels[6]+cx[3]*pixels[7])+cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+ cx[2]*pixels[10]+cx[3]*pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]* pixels[13]+cx[2]*pixels[14]+cx[3]*pixels[15])); break; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelChannels() applies a pixel interpolation method between a % floating point coordinate and the pixels surrounding that coordinate. No % pixel area resampling, or scaling of the result is performed. % % Interpolation is restricted to just the current channel setting of the % destination image into which the color is to be stored % % The format of the InterpolatePixelChannels method is: % % MagickBooleanType InterpolatePixelChannels( % const Image *magick_restrict source,const CacheView *source_view, % const Image *magick_restrict destination, % const PixelInterpolateMethod method,const double x,const double y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o source: the source. % % o source_view: the source view. % % o destination: the destination image, for the interpolated color % % o method: the pixel color interpolation method. % % o x,y: A double representing the current (x,y) position of the pixel. % % o pixel: return the interpolated pixel here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType InterpolatePixelChannels( const Image *magick_restrict source,const CacheView_ *source_view, const Image *magick_restrict destination,const PixelInterpolateMethod method, const double x,const double y,Quantum *pixel,ExceptionInfo *exception) { MagickBooleanType status; double alpha[16], gamma, pixels[16]; const Quantum *magick_restrict p; ssize_t i; ssize_t x_offset, y_offset; PixelInterpolateMethod interpolate; assert(source != (Image *) NULL); assert(source->signature == MagickCoreSignature); assert(source_view != (CacheView *) NULL); status=MagickTrue; x_offset=CastDoubleToLong(floor(x)); y_offset=CastDoubleToLong(floor(y)); interpolate=method; if (interpolate == UndefinedInterpolatePixel) interpolate=source->interpolate; switch (interpolate) { case AverageInterpolatePixel: /* nearest 4 neighbours */ case Average9InterpolatePixel: /* nearest 9 neighbours */ case Average16InterpolatePixel: /* nearest 16 neighbours */ { ssize_t count; count=2; /* size of the area to average - default nearest 4 */ if (interpolate == Average9InterpolatePixel) { count=3; x_offset=CastDoubleToLong(floor(x+0.5)-1.0); y_offset=CastDoubleToLong(floor(y+0.5)-1.0); } else if (interpolate == Average16InterpolatePixel) { count=4; x_offset--; y_offset--; } p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,(size_t) count, (size_t) count,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } count*=count; /* Number of pixels to average */ for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { double sum; ssize_t j; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; for (j=0; j < (ssize_t) count; j++) pixels[j]=(double) p[j*GetPixelChannels(source)+i]; sum=0.0; if ((traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) count; j++) sum+=pixels[j]; sum/=count; SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel); continue; } for (j=0; j < (ssize_t) count; j++) { alpha[j]=QuantumScale*GetPixelAlpha(source,p+j* GetPixelChannels(source)); pixels[j]*=alpha[j]; gamma=PerceptibleReciprocal(alpha[j]); sum+=gamma*pixels[j]; } sum/=count; SetPixelChannel(destination,channel,ClampToQuantum(sum),pixel); } break; } case BilinearInterpolatePixel: default: { p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PointInfo delta, epsilon; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; delta.x=x-x_offset; delta.y=y-y_offset; epsilon.x=1.0-delta.x; epsilon.y=1.0-delta.y; pixels[0]=(double) p[i]; pixels[1]=(double) p[GetPixelChannels(source)+i]; pixels[2]=(double) p[2*GetPixelChannels(source)+i]; pixels[3]=(double) p[3*GetPixelChannels(source)+i]; if ((traits & BlendPixelTrait) == 0) { gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x))); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y* (epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x* pixels[2]+delta.x*pixels[3]))),pixel); continue; } alpha[0]=QuantumScale*GetPixelAlpha(source,p); alpha[1]=QuantumScale*GetPixelAlpha(source,p+GetPixelChannels(source)); alpha[2]=QuantumScale*GetPixelAlpha(source,p+2* GetPixelChannels(source)); alpha[3]=QuantumScale*GetPixelAlpha(source,p+3* GetPixelChannels(source)); pixels[0]*=alpha[0]; pixels[1]*=alpha[1]; pixels[2]*=alpha[2]; pixels[3]*=alpha[3]; gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y* (epsilon.x*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma*(epsilon.y* (epsilon.x*pixels[0]+delta.x*pixels[1])+delta.y*(epsilon.x*pixels[2]+ delta.x*pixels[3]))),pixel); } break; } case BlendInterpolatePixel: { p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { ssize_t j; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; if (source->alpha_trait != BlendPixelTrait) for (j=0; j < 4; j++) { alpha[j]=1.0; pixels[j]=(double) p[j*GetPixelChannels(source)+i]; } else for (j=0; j < 4; j++) { alpha[j]=QuantumScale*GetPixelAlpha(source,p+j* GetPixelChannels(source)); pixels[j]=(double) p[j*GetPixelChannels(source)+i]; if (channel != AlphaPixelChannel) pixels[j]*=alpha[j]; } gamma=1.0; /* number of pixels blended together (its variable) */ for (j=0; j <= 1L; j++) { if ((y-y_offset) >= 0.75) { alpha[j]=alpha[j+2]; /* take right pixels */ pixels[j]=pixels[j+2]; } else if ((y-y_offset) > 0.25) { gamma=2.0; /* blend both pixels in row */ alpha[j]+=alpha[j+2]; /* add up alpha weights */ pixels[j]+=pixels[j+2]; } } if ((x-x_offset) >= 0.75) { alpha[0]=alpha[1]; /* take bottom row blend */ pixels[0]=pixels[1]; } else if ((x-x_offset) > 0.25) { gamma*=2.0; /* blend both rows */ alpha[0]+=alpha[1]; /* add up alpha weights */ pixels[0]+=pixels[1]; } if (channel != AlphaPixelChannel) gamma=PerceptibleReciprocal(alpha[0]); /* (color) 1/alpha_weights */ else gamma=PerceptibleReciprocal(gamma); /* (alpha) 1/number_of_pixels */ SetPixelChannel(destination,channel,ClampToQuantum(gamma*pixels[0]), pixel); } break; } case CatromInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { ssize_t j; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) for (j=0; j < 16; j++) { alpha[j]=1.0; pixels[j]=(double) p[j*GetPixelChannels(source)+i]; } else for (j=0; j < 16; j++) { alpha[j]=QuantumScale*GetPixelAlpha(source,p+j* GetPixelChannels(source)); pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i]; } CatromWeights((double) (x-x_offset),&cx); CatromWeights((double) (y-y_offset),&cy); gamma=((traits & BlendPixelTrait) ? (double) (1.0) : PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]* alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]* alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]* alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+ cx[2]*alpha[14]+cx[3]*alpha[15]))); SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]* pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]* (cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+ cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]* pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]* pixels[14]+cx[3]*pixels[15]))),pixel); } break; } case IntegerInterpolatePixel: { p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],pixel); } break; } case NearestInterpolatePixel: { x_offset=CastDoubleToLong(floor(x+0.5)); y_offset=CastDoubleToLong(floor(y+0.5)); p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],pixel); } break; } case MeshInterpolatePixel: { p=GetCacheViewVirtualPixels(source_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PointInfo delta, luminance; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; pixels[0]=(double) p[i]; pixels[1]=(double) p[GetPixelChannels(source)+i]; pixels[2]=(double) p[2*GetPixelChannels(source)+i]; pixels[3]=(double) p[3*GetPixelChannels(source)+i]; if ((traits & BlendPixelTrait) == 0) { alpha[0]=1.0; alpha[1]=1.0; alpha[2]=1.0; alpha[3]=1.0; } else { alpha[0]=QuantumScale*GetPixelAlpha(source,p); alpha[1]=QuantumScale*GetPixelAlpha(source,p+ GetPixelChannels(source)); alpha[2]=QuantumScale*GetPixelAlpha(source,p+2* GetPixelChannels(source)); alpha[3]=QuantumScale*GetPixelAlpha(source,p+3* GetPixelChannels(source)); } delta.x=x-x_offset; delta.y=y-y_offset; luminance.x=fabs((double) (GetPixelLuma(source,p)- GetPixelLuma(source,p+3*GetPixelChannels(source)))); luminance.y=fabs((double) (GetPixelLuma(source,p+ GetPixelChannels(source))-GetPixelLuma(source,p+2* GetPixelChannels(source)))); if (luminance.x < luminance.y) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel: 2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma* MeshInterpolate(&delta,pixels[2],pixels[3],pixels[0])),pixel); } else { /* Top-right triangle (pixel: 1, diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma* MeshInterpolate(&delta,pixels[1],pixels[0],pixels[3])),pixel); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel: 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma* MeshInterpolate(&delta,pixels[0],pixels[1],pixels[2])),pixel); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); SetPixelChannel(destination,channel,ClampToQuantum(gamma* MeshInterpolate(&delta,pixels[3],pixels[2],pixels[1])),pixel); } } } break; } case SplineInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(source_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { ssize_t j; PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait traits = GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) for (j=0; j < 16; j++) { alpha[j]=1.0; pixels[j]=(double) p[j*GetPixelChannels(source)+i]; } else for (j=0; j < 16; j++) { alpha[j]=QuantumScale*GetPixelAlpha(source,p+j* GetPixelChannels(source)); pixels[j]=alpha[j]*p[j*GetPixelChannels(source)+i]; } SplineWeights((double) (x-x_offset),&cx); SplineWeights((double) (y-y_offset),&cy); gamma=((traits & BlendPixelTrait) ? (double) (1.0) : PerceptibleReciprocal(cy[0]*(cx[0]*alpha[0]+cx[1]*alpha[1]+cx[2]* alpha[2]+cx[3]*alpha[3])+cy[1]*(cx[0]*alpha[4]+cx[1]*alpha[5]+cx[2]* alpha[6]+cx[3]*alpha[7])+cy[2]*(cx[0]*alpha[8]+cx[1]*alpha[9]+cx[2]* alpha[10]+cx[3]*alpha[11])+cy[3]*(cx[0]*alpha[12]+cx[1]*alpha[13]+ cx[2]*alpha[14]+cx[3]*alpha[15]))); SetPixelChannel(destination,channel,ClampToQuantum(gamma*(cy[0]*(cx[0]* pixels[0]+cx[1]*pixels[1]+cx[2]*pixels[2]+cx[3]*pixels[3])+cy[1]* (cx[0]*pixels[4]+cx[1]*pixels[5]+cx[2]*pixels[6]+cx[3]*pixels[7])+ cy[2]*(cx[0]*pixels[8]+cx[1]*pixels[9]+cx[2]*pixels[10]+cx[3]* pixels[11])+cy[3]*(cx[0]*pixels[12]+cx[1]*pixels[13]+cx[2]* pixels[14]+cx[3]*pixels[15]))),pixel); } break; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelInfo() applies a pixel interpolation method between a % floating point coordinate and the pixels surrounding that coordinate. No % pixel area resampling, or scaling of the result is performed. % % Interpolation is restricted to just RGBKA channels. % % The format of the InterpolatePixelInfo method is: % % MagickBooleanType InterpolatePixelInfo(const Image *image, % const CacheView *image_view,const PixelInterpolateMethod method, % const double x,const double y,PixelInfo *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image view. % % o method: the pixel color interpolation method. % % o x,y: A double representing the current (x,y) position of the pixel. % % o pixel: return the interpolated pixel here. % % o exception: return any errors or warnings in this structure. % */ static inline void AlphaBlendPixelInfo(const Image *image, const Quantum *pixel,PixelInfo *pixel_info,double *alpha) { if (image->alpha_trait == UndefinedPixelTrait) { *alpha=1.0; pixel_info->red=(double) GetPixelRed(image,pixel); pixel_info->green=(double) GetPixelGreen(image,pixel); pixel_info->blue=(double) GetPixelBlue(image,pixel); pixel_info->black=0.0; if (image->colorspace == CMYKColorspace) pixel_info->black=(double) GetPixelBlack(image,pixel); pixel_info->alpha=(double) GetPixelAlpha(image,pixel); return; } *alpha=QuantumScale*GetPixelAlpha(image,pixel); pixel_info->red=(*alpha*GetPixelRed(image,pixel)); pixel_info->green=(*alpha*GetPixelGreen(image,pixel)); pixel_info->blue=(*alpha*GetPixelBlue(image,pixel)); pixel_info->black=0.0; if (image->colorspace == CMYKColorspace) pixel_info->black=(*alpha*GetPixelBlack(image,pixel)); pixel_info->alpha=(double) GetPixelAlpha(image,pixel); } MagickExport MagickBooleanType InterpolatePixelInfo(const Image *image, const CacheView_ *image_view,const PixelInterpolateMethod method, const double x,const double y,PixelInfo *pixel,ExceptionInfo *exception) { MagickBooleanType status; double alpha[16], gamma; PixelInfo pixels[16]; const Quantum *p; ssize_t i; ssize_t x_offset, y_offset; PixelInterpolateMethod interpolate; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image_view != (CacheView *) NULL); status=MagickTrue; x_offset=CastDoubleToLong(floor(x)); y_offset=CastDoubleToLong(floor(y)); interpolate=method; if (interpolate == UndefinedInterpolatePixel) interpolate=image->interpolate; GetPixelInfoPixel(image,(const Quantum *) NULL,pixel); (void) memset(&pixels,0,sizeof(pixels)); switch (interpolate) { case AverageInterpolatePixel: /* nearest 4 neighbours */ case Average9InterpolatePixel: /* nearest 9 neighbours */ case Average16InterpolatePixel: /* nearest 16 neighbours */ { ssize_t count; count=2; /* size of the area to average - default nearest 4 */ if (interpolate == Average9InterpolatePixel) { count=3; x_offset=CastDoubleToLong(floor(x+0.5)-1.0); y_offset=CastDoubleToLong(floor(y+0.5)-1.0); } else if (interpolate == Average16InterpolatePixel) { count=4; x_offset--; y_offset--; } p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,(size_t) count, (size_t) count,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } count*=count; /* number of pixels - square of size */ for (i=0; i < (ssize_t) count; i++) { AlphaBlendPixelInfo(image,p,pixels,alpha); gamma=PerceptibleReciprocal(alpha[0]); pixel->red+=gamma*pixels[0].red; pixel->green+=gamma*pixels[0].green; pixel->blue+=gamma*pixels[0].blue; pixel->black+=gamma*pixels[0].black; pixel->alpha+=pixels[0].alpha; p += GetPixelChannels(image); } gamma=1.0/count; /* average weighting of each pixel in area */ pixel->red*=gamma; pixel->green*=gamma; pixel->blue*=gamma; pixel->black*=gamma; pixel->alpha*=gamma; break; } case BackgroundInterpolatePixel: { *pixel=image->background_color; /* Copy PixelInfo Structure */ break; } case BilinearInterpolatePixel: default: { PointInfo delta, epsilon; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < 4L; i++) AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i); delta.x=x-x_offset; delta.y=y-y_offset; epsilon.x=1.0-delta.x; epsilon.y=1.0-delta.y; gamma=((epsilon.y*(epsilon.x*alpha[0]+delta.x*alpha[1])+delta.y* (epsilon.x*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); pixel->red=gamma*(epsilon.y*(epsilon.x*pixels[0].red+delta.x* pixels[1].red)+delta.y*(epsilon.x*pixels[2].red+delta.x*pixels[3].red)); pixel->green=gamma*(epsilon.y*(epsilon.x*pixels[0].green+delta.x* pixels[1].green)+delta.y*(epsilon.x*pixels[2].green+delta.x* pixels[3].green)); pixel->blue=gamma*(epsilon.y*(epsilon.x*pixels[0].blue+delta.x* pixels[1].blue)+delta.y*(epsilon.x*pixels[2].blue+delta.x* pixels[3].blue)); if (image->colorspace == CMYKColorspace) pixel->black=gamma*(epsilon.y*(epsilon.x*pixels[0].black+delta.x* pixels[1].black)+delta.y*(epsilon.x*pixels[2].black+delta.x* pixels[3].black)); gamma=((epsilon.y*(epsilon.x+delta.x)+delta.y*(epsilon.x+delta.x))); gamma=PerceptibleReciprocal(gamma); pixel->alpha=gamma*(epsilon.y*(epsilon.x*pixels[0].alpha+delta.x* pixels[1].alpha)+delta.y*(epsilon.x*pixels[2].alpha+delta.x* pixels[3].alpha)); break; } case BlendInterpolatePixel: { p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < 4L; i++) { GetPixelInfoPixel(image,p+i*GetPixelChannels(image),pixels+i); AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i); } gamma=1.0; /* number of pixels blended together (its variable) */ for (i=0; i <= 1L; i++) { if ((y-y_offset) >= 0.75) { alpha[i]=alpha[i+2]; /* take right pixels */ pixels[i]=pixels[i+2]; } else if ((y-y_offset) > 0.25) { gamma=2.0; /* blend both pixels in row */ alpha[i]+=alpha[i+2]; /* add up alpha weights */ pixels[i].red+=pixels[i+2].red; pixels[i].green+=pixels[i+2].green; pixels[i].blue+=pixels[i+2].blue; pixels[i].black+=pixels[i+2].black; pixels[i].alpha+=pixels[i+2].alpha; } } if ((x-x_offset) >= 0.75) { alpha[0]=alpha[1]; pixels[0]=pixels[1]; } else if ((x-x_offset) > 0.25) { gamma*=2.0; /* blend both rows */ alpha[0]+= alpha[1]; /* add up alpha weights */ pixels[0].red+=pixels[1].red; pixels[0].green+=pixels[1].green; pixels[0].blue+=pixels[1].blue; pixels[0].black+=pixels[1].black; pixels[0].alpha+=pixels[1].alpha; } gamma=1.0/gamma; alpha[0]=PerceptibleReciprocal(alpha[0]); pixel->red=alpha[0]*pixels[0].red; pixel->green=alpha[0]*pixels[0].green; /* divide by sum of alpha */ pixel->blue=alpha[0]*pixels[0].blue; pixel->black=alpha[0]*pixels[0].black; pixel->alpha=gamma*pixels[0].alpha; /* divide by number of pixels */ break; } case CatromInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < 16L; i++) AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i); CatromWeights((double) (x-x_offset),&cx); CatromWeights((double) (y-y_offset),&cy); pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]* pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]* pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]* pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]* pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]* pixels[14].red+cx[3]*pixels[15].red)); pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]* pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+ cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+ cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]* pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]* pixels[12].green+cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]* pixels[15].green)); pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]* pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]* pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]* pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]* pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+ cx[2]*pixels[14].blue+cx[3]*pixels[15].blue)); if (image->colorspace == CMYKColorspace) pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]* pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+ cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+ cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]* pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]* pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]* pixels[15].black)); pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]* pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+ cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+ cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]* pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+ cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha)); break; } case IntegerInterpolatePixel: { p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } GetPixelInfoPixel(image,p,pixel); break; } case MeshInterpolatePixel: { PointInfo delta, luminance; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } delta.x=x-x_offset; delta.y=y-y_offset; luminance.x=GetPixelLuma(image,p)-(double) GetPixelLuma(image,p+3*GetPixelChannels(image)); luminance.y=GetPixelLuma(image,p+GetPixelChannels(image))-(double) GetPixelLuma(image,p+2*GetPixelChannels(image)); AlphaBlendPixelInfo(image,p,pixels+0,alpha+0); AlphaBlendPixelInfo(image,p+GetPixelChannels(image),pixels+1,alpha+1); AlphaBlendPixelInfo(image,p+2*GetPixelChannels(image),pixels+2,alpha+2); AlphaBlendPixelInfo(image,p+3*GetPixelChannels(image),pixels+3,alpha+3); if (fabs((double) luminance.x) < fabs((double) luminance.y)) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel: 2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); pixel->red=gamma*MeshInterpolate(&delta,pixels[2].red, pixels[3].red,pixels[0].red); pixel->green=gamma*MeshInterpolate(&delta,pixels[2].green, pixels[3].green,pixels[0].green); pixel->blue=gamma*MeshInterpolate(&delta,pixels[2].blue, pixels[3].blue,pixels[0].blue); if (image->colorspace == CMYKColorspace) pixel->black=gamma*MeshInterpolate(&delta,pixels[2].black, pixels[3].black,pixels[0].black); gamma=MeshInterpolate(&delta,1.0,1.0,1.0); pixel->alpha=gamma*MeshInterpolate(&delta,pixels[2].alpha, pixels[3].alpha,pixels[0].alpha); } else { /* Top-right triangle (pixel:1 , diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); pixel->red=gamma*MeshInterpolate(&delta,pixels[1].red, pixels[0].red,pixels[3].red); pixel->green=gamma*MeshInterpolate(&delta,pixels[1].green, pixels[0].green,pixels[3].green); pixel->blue=gamma*MeshInterpolate(&delta,pixels[1].blue, pixels[0].blue,pixels[3].blue); if (image->colorspace == CMYKColorspace) pixel->black=gamma*MeshInterpolate(&delta,pixels[1].black, pixels[0].black,pixels[3].black); gamma=MeshInterpolate(&delta,1.0,1.0,1.0); pixel->alpha=gamma*MeshInterpolate(&delta,pixels[1].alpha, pixels[0].alpha,pixels[3].alpha); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel: 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); pixel->red=gamma*MeshInterpolate(&delta,pixels[0].red, pixels[1].red,pixels[2].red); pixel->green=gamma*MeshInterpolate(&delta,pixels[0].green, pixels[1].green,pixels[2].green); pixel->blue=gamma*MeshInterpolate(&delta,pixels[0].blue, pixels[1].blue,pixels[2].blue); if (image->colorspace == CMYKColorspace) pixel->black=gamma*MeshInterpolate(&delta,pixels[0].black, pixels[1].black,pixels[2].black); gamma=MeshInterpolate(&delta,1.0,1.0,1.0); pixel->alpha=gamma*MeshInterpolate(&delta,pixels[0].alpha, pixels[1].alpha,pixels[2].alpha); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); pixel->red=gamma*MeshInterpolate(&delta,pixels[3].red, pixels[2].red,pixels[1].red); pixel->green=gamma*MeshInterpolate(&delta,pixels[3].green, pixels[2].green,pixels[1].green); pixel->blue=gamma*MeshInterpolate(&delta,pixels[3].blue, pixels[2].blue,pixels[1].blue); if (image->colorspace == CMYKColorspace) pixel->black=gamma*MeshInterpolate(&delta,pixels[3].black, pixels[2].black,pixels[1].black); gamma=MeshInterpolate(&delta,1.0,1.0,1.0); pixel->alpha=gamma*MeshInterpolate(&delta,pixels[3].alpha, pixels[2].alpha,pixels[1].alpha); } } break; } case NearestInterpolatePixel: { x_offset=CastDoubleToLong(floor(x+0.5)); y_offset=CastDoubleToLong(floor(y+0.5)); p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } GetPixelInfoPixel(image,p,pixel); break; } case SplineInterpolatePixel: { double cx[4], cy[4]; p=GetCacheViewVirtualPixels(image_view,x_offset-1,y_offset-1,4,4, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (i=0; i < 16L; i++) AlphaBlendPixelInfo(image,p+i*GetPixelChannels(image),pixels+i,alpha+i); SplineWeights((double) (x-x_offset),&cx); SplineWeights((double) (y-y_offset),&cy); pixel->red=(cy[0]*(cx[0]*pixels[0].red+cx[1]*pixels[1].red+cx[2]* pixels[2].red+cx[3]*pixels[3].red)+cy[1]*(cx[0]*pixels[4].red+cx[1]* pixels[5].red+cx[2]*pixels[6].red+cx[3]*pixels[7].red)+cy[2]*(cx[0]* pixels[8].red+cx[1]*pixels[9].red+cx[2]*pixels[10].red+cx[3]* pixels[11].red)+cy[3]*(cx[0]*pixels[12].red+cx[1]*pixels[13].red+cx[2]* pixels[14].red+cx[3]*pixels[15].red)); pixel->green=(cy[0]*(cx[0]*pixels[0].green+cx[1]*pixels[1].green+cx[2]* pixels[2].green+cx[3]*pixels[3].green)+cy[1]*(cx[0]*pixels[4].green+ cx[1]*pixels[5].green+cx[2]*pixels[6].green+cx[3]*pixels[7].green)+ cy[2]*(cx[0]*pixels[8].green+cx[1]*pixels[9].green+cx[2]* pixels[10].green+cx[3]*pixels[11].green)+cy[3]*(cx[0]*pixels[12].green+ cx[1]*pixels[13].green+cx[2]*pixels[14].green+cx[3]*pixels[15].green)); pixel->blue=(cy[0]*(cx[0]*pixels[0].blue+cx[1]*pixels[1].blue+cx[2]* pixels[2].blue+cx[3]*pixels[3].blue)+cy[1]*(cx[0]*pixels[4].blue+cx[1]* pixels[5].blue+cx[2]*pixels[6].blue+cx[3]*pixels[7].blue)+cy[2]*(cx[0]* pixels[8].blue+cx[1]*pixels[9].blue+cx[2]*pixels[10].blue+cx[3]* pixels[11].blue)+cy[3]*(cx[0]*pixels[12].blue+cx[1]*pixels[13].blue+ cx[2]*pixels[14].blue+cx[3]*pixels[15].blue)); if (image->colorspace == CMYKColorspace) pixel->black=(cy[0]*(cx[0]*pixels[0].black+cx[1]*pixels[1].black+cx[2]* pixels[2].black+cx[3]*pixels[3].black)+cy[1]*(cx[0]*pixels[4].black+ cx[1]*pixels[5].black+cx[2]*pixels[6].black+cx[3]*pixels[7].black)+ cy[2]*(cx[0]*pixels[8].black+cx[1]*pixels[9].black+cx[2]* pixels[10].black+cx[3]*pixels[11].black)+cy[3]*(cx[0]* pixels[12].black+cx[1]*pixels[13].black+cx[2]*pixels[14].black+cx[3]* pixels[15].black)); pixel->alpha=(cy[0]*(cx[0]*pixels[0].alpha+cx[1]*pixels[1].alpha+cx[2]* pixels[2].alpha+cx[3]*pixels[3].alpha)+cy[1]*(cx[0]*pixels[4].alpha+ cx[1]*pixels[5].alpha+cx[2]*pixels[6].alpha+cx[3]*pixels[7].alpha)+ cy[2]*(cx[0]*pixels[8].alpha+cx[1]*pixels[9].alpha+cx[2]* pixels[10].alpha+cx[3]*pixels[11].alpha)+cy[3]*(cx[0]*pixels[12].alpha+ cx[1]*pixels[13].alpha+cx[2]*pixels[14].alpha+cx[3]*pixels[15].alpha)); break; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I s F u z z y E q u i v a l e n c e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsFuzzyEquivalencePixel() returns MagickTrue if the distance between two % pixels is less than the specified distance in a linear three (or four) % dimensional color space. % % The format of the IsFuzzyEquivalencePixel method is: % % void IsFuzzyEquivalencePixel(const Image *source,const Quantum *p, % const Image *destination,const Quantum *q) % % A description of each parameter follows: % % o source: the source image. % % o p: Pixel p. % % o destination: the destination image. % % o q: Pixel q. % */ MagickExport MagickBooleanType IsFuzzyEquivalencePixel(const Image *source, const Quantum *p,const Image *destination,const Quantum *q) { double distance, fuzz, pixel, scale; fuzz=GetFuzzyColorDistance(source,destination); scale=1.0; distance=0.0; if ((source->alpha_trait != UndefinedPixelTrait) || (destination->alpha_trait != UndefinedPixelTrait)) { /* Transparencies are involved - set alpha distance. */ pixel=GetPixelAlpha(source,p)-(double) GetPixelAlpha(destination,q); distance=pixel*pixel; if (distance > fuzz) return(MagickFalse); /* Generate a alpha scaling factor to generate a 4D cone on colorspace. Note that if one color is transparent, distance has no color component. */ if (source->alpha_trait != UndefinedPixelTrait) scale*=QuantumScale*GetPixelAlpha(source,p); if (destination->alpha_trait != UndefinedPixelTrait) scale*=QuantumScale*GetPixelAlpha(destination,q); if (scale <= MagickEpsilon) return(MagickTrue); } /* RGB or CMY color cube. */ distance*=3.0; /* rescale appropriately */ fuzz*=3.0; pixel=GetPixelRed(source,p)-(double) GetPixelRed(destination,q); if (IsHueCompatibleColorspace(source->colorspace) != MagickFalse) { /* Compute an arc distance for hue. It should be a vector angle of 'S'/'W' length with 'L'/'B' forming appropriate cones. */ if (fabs((double) pixel) > (QuantumRange/2)) pixel-=QuantumRange; pixel*=2.0; } distance+=scale*pixel*pixel; if (distance > fuzz) return(MagickFalse); pixel=GetPixelGreen(source,p)-(double) GetPixelGreen(destination,q); distance+=scale*pixel*pixel; if (distance > fuzz) return(MagickFalse); pixel=GetPixelBlue(source,p)-(double) GetPixelBlue(destination,q); distance+=scale*pixel*pixel; if (distance > fuzz) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I s F u z z y E q u i v a l e n c e P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsFuzzyEquivalencePixelInfo() returns true if the distance between two % colors is less than the specified distance in a linear three (or four) % dimensional color space. % % This implements the equivalent of: % fuzz < sqrt(color_distance^2 * u.a*v.a + alpha_distance^2) % % Which produces a multi-dimensional cone for that colorspace along the % transparency vector. % % For example for an RGB: % color_distance^2 = ( (u.r-v.r)^2 + (u.g-v.g)^2 + (u.b-v.b)^2 ) / 3 % % See https://imagemagick.org/Usage/bugs/fuzz_distance/ % % Hue colorspace distances need more work. Hue is not a distance, it is an % angle! % % A check that q is in the same color space as p should be made and the % appropriate mapping made. -- Anthony Thyssen 8 December 2010 % % The format of the IsFuzzyEquivalencePixelInfo method is: % % MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p, % const PixelInfo *q) % % A description of each parameter follows: % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType IsFuzzyEquivalencePixelInfo(const PixelInfo *p, const PixelInfo *q) { double fuzz, pixel; double scale, distance; fuzz=(double) MagickMax(MagickMax(p->fuzz,q->fuzz),(MagickRealType) MagickSQ1_2); fuzz*=fuzz; scale=1.0; distance=0.0; if ((p->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { /* Transparencies are involved - set alpha distance. */ pixel=(p->alpha_trait != UndefinedPixelTrait ? p->alpha : OpaqueAlpha)- (q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); distance=pixel*pixel; if (distance > fuzz) return(MagickFalse); /* Generate a alpha scaling factor to generate a 4D cone on colorspace. If one color is transparent, distance has no color component. */ if (p->alpha_trait != UndefinedPixelTrait) scale=(QuantumScale*p->alpha); if (q->alpha_trait != UndefinedPixelTrait) scale*=(QuantumScale*q->alpha); if (scale <= MagickEpsilon ) return(MagickTrue); } /* CMYK create a CMY cube with a multi-dimensional cone toward black. */ if (p->colorspace == CMYKColorspace) { pixel=p->black-q->black; distance+=pixel*pixel*scale; if (distance > fuzz) return(MagickFalse); scale*=(double) (QuantumScale*(QuantumRange-p->black)); scale*=(double) (QuantumScale*(QuantumRange-q->black)); } /* RGB or CMY color cube. */ distance*=3.0; /* rescale appropriately */ fuzz*=3.0; pixel=p->red-q->red; if (IsHueCompatibleColorspace(p->colorspace) != MagickFalse) { /* This calculates a arc distance for hue-- it should be a vector angle of 'S'/'W' length with 'L'/'B' forming appropriate cones. In other words this is a hack - Anthony. */ if (fabs((double) pixel) > (QuantumRange/2)) pixel-=QuantumRange; pixel*=2.0; } distance+=pixel*pixel*scale; if (distance > fuzz) return(MagickFalse); pixel=p->green-q->green; distance+=pixel*pixel*scale; if (distance > fuzz) return(MagickFalse); pixel=p->blue-q->blue; distance+=pixel*pixel*scale; if (distance > fuzz) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelChannelMask() sets the pixel channel map from the specified channel % mask. % % The format of the SetPixelChannelMask method is: % % ChannelType SetPixelChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ static void LogPixelChannels(const Image *image) { ssize_t i; (void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]", image->filename,image->channel_mask); for (i=0; i < (ssize_t) image->number_channels; i++) { char channel_name[MagickPathExtent], traits[MagickPathExtent]; const char *name; PixelChannel channel; channel=GetPixelChannelChannel(image,i); switch (channel) { case RedPixelChannel: { name="red"; if (image->colorspace == CMYKColorspace) name="cyan"; if ((image->colorspace == LinearGRAYColorspace) || (image->colorspace == GRAYColorspace)) name="gray"; break; } case GreenPixelChannel: { name="green"; if (image->colorspace == CMYKColorspace) name="magenta"; break; } case BluePixelChannel: { name="blue"; if (image->colorspace == CMYKColorspace) name="yellow"; break; } case BlackPixelChannel: { name="black"; if (image->storage_class == PseudoClass) name="index"; break; } case IndexPixelChannel: { name="index"; break; } case AlphaPixelChannel: { name="alpha"; break; } case ReadMaskPixelChannel: { name="read-mask"; break; } case WriteMaskPixelChannel: { name="write-mask"; break; } case CompositeMaskPixelChannel: { name="composite-mask"; break; } case MetaPixelChannel: { name="meta"; break; } default: name="undefined"; } if (image->colorspace == UndefinedColorspace) { (void) FormatLocaleString(channel_name,MagickPathExtent,"%.20g", (double) channel); name=(const char *) channel_name; } *traits='\0'; if ((GetPixelChannelTraits(image,channel) & UpdatePixelTrait) != 0) (void) ConcatenateMagickString(traits,"update,",MagickPathExtent); if ((GetPixelChannelTraits(image,channel) & BlendPixelTrait) != 0) (void) ConcatenateMagickString(traits,"blend,",MagickPathExtent); if ((GetPixelChannelTraits(image,channel) & CopyPixelTrait) != 0) (void) ConcatenateMagickString(traits,"copy,",MagickPathExtent); if (*traits == '\0') (void) ConcatenateMagickString(traits,"undefined,",MagickPathExtent); traits[strlen(traits)-1]='\0'; (void) LogMagickEvent(PixelEvent,GetMagickModule()," %.20g: %s (%s)", (double) i,name,traits); } } MagickExport ChannelType SetPixelChannelMask(Image *image, const ChannelType channel_mask) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) ChannelType mask; ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(PixelEvent,GetMagickModule(),"%s[%08x]", image->filename,channel_mask); mask=image->channel_mask; image->channel_mask=channel_mask; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (GetChannelBit(channel_mask,channel) == 0) { SetPixelChannelTraits(image,channel,CopyPixelTrait); continue; } if (channel == AlphaPixelChannel) { if ((image->alpha_trait & CopyPixelTrait) != 0) { SetPixelChannelTraits(image,channel,CopyPixelTrait); continue; } SetPixelChannelTraits(image,channel,UpdatePixelTrait); continue; } if (image->alpha_trait != UndefinedPixelTrait) { SetPixelChannelTraits(image,channel,(const PixelTrait) (UpdatePixelTrait | BlendPixelTrait)); continue; } SetPixelChannelTraits(image,channel,UpdatePixelTrait); } if (image->storage_class == PseudoClass) SetPixelChannelTraits(image,IndexPixelChannel,CopyPixelTrait); if ((image->channels & ReadMaskChannel) != 0) SetPixelChannelTraits(image,ReadMaskPixelChannel,CopyPixelTrait); if ((image->channels & WriteMaskChannel) != 0) SetPixelChannelTraits(image,WriteMaskPixelChannel,CopyPixelTrait); if ((image->channels & CompositeMaskChannel) != 0) SetPixelChannelTraits(image,CompositeMaskPixelChannel,CopyPixelTrait); if (image->debug != MagickFalse) LogPixelChannels(image); return(mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l M e t a C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelMetaChannels() sets the image meta channels. % % The format of the SetPixelMetaChannels method is: % % MagickBooleanType SetPixelMetaChannels(Image *image, % const size_t number_meta_channels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_meta_channels: the number of meta channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetPixelMetaChannels(Image *image, const size_t number_meta_channels,ExceptionInfo *exception) { image->number_meta_channels=number_meta_channels; InitializePixelChannelMap(image); return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o r t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SortImagePixels() sorts pixels within each scanline in ascending order of % intensity. % % The format of the SortImagePixels method is: % % MagickBooleanType SortImagePixels(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SortImagePixels(Image *image, ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Sort image pixels. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns-1; x++) { MagickRealType current, previous; ssize_t j; previous=GetPixelIntensity(image,q); for (j=0; j < (ssize_t) (image->columns-x-1); j++) { current=GetPixelIntensity(image,q+(j+1)*GetPixelChannels(image)); if (previous > current) { Quantum pixel[MaxPixelChannels]; /* Swap adjacent pixels. */ (void) memcpy(pixel,q+j*GetPixelChannels(image), GetPixelChannels(image)*sizeof(Quantum)); (void) memcpy(q+j*GetPixelChannels(image),q+(j+1)* GetPixelChannels(image),GetPixelChannels(image)*sizeof(Quantum)); (void) memcpy(q+(j+1)*GetPixelChannels(image),pixel, GetPixelChannels(image)*sizeof(Quantum)); } else previous=current; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16) // A*D function (colscale): GB (_AxD__minus_int16) // D*A function (rowscale): GB (_DxB__minus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16) // C=scalar+B GB (_bind1st__minus_int16) // C=scalar+B' GB (_bind1st_tran__minus_int16) // C=A+scalar GB (_bind2nd__minus_int16) // C=A'+scalar GB (_bind2nd_tran__minus_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
construction.h
/* An Experimental Study on Hub Labeling based Shortest Path Algorithms [Experiments and Analyses] Authors: Ye Li, Leong Hou U, Man Lung Yiu, Ngai Meng Kou Contact: yb47438@umac.mo Affiliation: University of Macau The MIT License (MIT) Copyright (c) 2016 University of Macau Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #ifndef CONSTRUCTION_H #define CONSTRUCTION_H #include <queue> #include <set> #include "graph.h" #include "paras.h" #include "labels.h" #include "ordering.h" #include "heap.h" #include "graph_search.h" #include <omp.h> #include <unordered_map> #define numOfVertices SP_Constants::numOfVertices #define numOfEdges SP_Constants::numOfEdges #define INF_WEIGHT SP_Constants::INF_WEIGHT class construction { public: Label labels; DLabel dlabels; PLabel plabels; DPLabel dplabels; CLabel clabels; Ordering orders; }; class PL : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; PL(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid){ NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } PL(Graph &graph, Ordering &orders, bool D_FLAGS) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); // Backward labels. vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid){ NodeID w = edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } void TD_BP_UNDIRECTED(Graph& graph, Ordering &orderes, int kNumBitParallelRoots, bool directed = false, bool bp = true) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } void TP_path(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = plabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<NodeID> parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; parents[r] = inv[r]; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parents[v]; index_t_path &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parents[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parents[v].first.clear(); tmp_idx_parents[v].second.clear(); tmp_idx_parents[v].first.shrink_to_fit(); tmp_idx_parents[v].second.shrink_to_fit(); } } void TP_path_d(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = dplabels.index_; vector<index_t_path>& bindex_ = dplabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<NodeID> parents(numOfVertices, numOfVertices); vector<NodeID> r_parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); // Backward labels. vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); // Backward labels. vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; parents[r] = inv[r]; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &r_tmp_idx_parent_v = r_tmp_idx_parent[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); r_tmp_idx_parent_v.first.back() = r; r_tmp_idx_parent_v.second.back() = parents[v]; r_tmp_idx_parent_v.first.push_back(numOfVertices); r_tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; parents[r] = inv[r]; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parent[v].first.shrink_to_fit(); tmp_idx_parent[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_p[i] = r_tmp_idx_parent[v].second[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx_parent[v].first.clear(); r_tmp_idx_parent[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); r_tmp_idx_parent[v].first.shrink_to_fit(); r_tmp_idx_parent[v].second.shrink_to_fit(); } } PL(Graph &graph, Ordering &orders, bool path_flags, bool directed_flags) { if (path_flags == true) { if (directed_flags == true) { TP_path_d(graph, orders); } else { TP_path(graph, orders); } } } /* PL(Graph &graph, Ordering &orders, bool path_flags, bool directed_flags, bool bp_flags, int kNumBitParallelRoots) { if (path_flags == true) { if (directed_flags == true) { TP_path_d(graph, orders); } else { TP_path(graph, orders); } } else { if (directed_flags == false) { TP_BP(graph, orders, kNumBitParallelRoots); } } } */ }; template<int kNumBitParallelRoots = 50> class BPL { //typedef BPLabel<kNumBitParallelRoots>::index_t_bp index_t_bp; public: BPLabel<kNumBitParallelRoots> bplabels; DBPLabel<kNumBitParallelRoots> dbplabels; BPL(Graph &graph, Ordering &orders) { //bplabels = BPLabel(kNumBitParallelRoots); //kNumBitParallelRoots = 64; //bplabels.setParas(kNumBitParallelRoots); // iteration_generated.resize(numOfVertices); // pruning_power.resize(numOfVertices); index_t_bp<kNumBitParallelRoots>*& index_ = bplabels.index_bp; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); { vector<EdgeWeight> tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > tmp_s(numOfVertices); vector<NodeID> que(numOfVertices); vector<std::pair<NodeID, NodeID> > sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > child_es(numOfEdges); cout << "Building BP labels" << endl; index_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); //for (NodeID v = 0; v < numOfVertices; ++v) { // index_t_bp &idx = index_[v]; // idx.bpspt_d = (EdgeWeight*)memalign(64, kNumBitParallelRoots * sizeof(EdgeWeight)); // idx.bpspt_s = (uint64_t*)memalign(64, kNumBitParallelRoots * 2 * sizeof(uint64_t)); // /*for (int i = 0; i < kNumBitParallelRoots; ++i) { // idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t)); // }*/ //} int r = 0; for (int i_bpspt = 0; i_bpspt < kNumBitParallelRoots; ++i_bpspt) { while (r < numOfVertices && usd[r]) ++r; if (r == numOfVertices) { for (NodeID v = 0; v < numOfVertices; ++v) index_[v].bpspt_d[i_bpspt] = INF_WEIGHT; continue; } usd[r] = true; fill(tmp_d.begin(), tmp_d.end(), INF_WEIGHT); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); int que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; vector<int> vs; vector<NodeID> adj_r(graph.vertices[r + 1] - graph.vertices[r]); for (EdgeID eid = graph.vertices[r]; eid < graph.vertices[r + 1]; eid++) { adj_r[eid - graph.vertices[r]] = graph.edges[eid]; } sort(adj_r.begin(), adj_r.end()); for (size_t i = 0; i < adj_r.size(); ++i) { NodeID v = adj_r[i]; if (!usd[v]) { usd[v] = true; que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; eid++) { NodeID tv = graph.edges[eid]; EdgeWeight td = d + 1; if (d > tmp_d[tv]); else if (d == tmp_d[tv]) { if (v < tv) { sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; } } else { if (tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { index_[inv[v]].bpspt_d[i_bpspt] = tmp_d[v]; index_[inv[v]].bpspt_s[i_bpspt][0] = tmp_s[v].first; index_[inv[v]].bpspt_s[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; } } } cout << "Building normal labels" << endl; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; // Prefetch _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; if (td - 2 <= d) { /*td += (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0;*/ td += (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0; if (td <= d) goto pruned; } } for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { //pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); //iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); index_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } BPL(Graph &graph, Ordering &orders, bool directed_flags) { //bplabels = BPLabel(kNumBitParallelRoots); //kNumBitParallelRoots = 64; //bplabels.setParas(kNumBitParallelRoots); // iteration_generated.resize(numOfVertices); // pruning_power.resize(numOfVertices); if (directed_flags == false) return; index_t_bp<kNumBitParallelRoots>*& index_ = dbplabels.index_bp; index_t_bp<kNumBitParallelRoots>*& bindex_ = dbplabels.bindex_bp; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<bool> r_usd(numOfVertices, false); // vector<bool> bp_usd(numOfVertices, false); // vector<bool> r_bp_usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. { vector<EdgeWeight> tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > tmp_s(numOfVertices); vector<EdgeWeight> r_tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > r_tmp_s(numOfVertices); vector<NodeID> que(numOfVertices); vector<std::pair<NodeID, NodeID> > sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > child_es(numOfEdges); vector<std::pair<NodeID, NodeID> > r_sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > r_child_es(numOfEdges); cout << "Building BP labels" << endl; index_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); bindex_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); //for (NodeID v = 0; v < numOfVertices; ++v) { // index_t_bp &idx = index_[v]; // idx.bpspt_d = (EdgeWeight*)memalign(64, kNumBitParallelRoots * sizeof(EdgeWeight)); // idx.bpspt_s = (uint64_t*)memalign(64, kNumBitParallelRoots * 2 * sizeof(uint64_t)); // /*for (int i = 0; i < kNumBitParallelRoots; ++i) { // idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t)); // }*/ //} int r = 0; for (int i_bpspt = 0; i_bpspt < kNumBitParallelRoots; ++i_bpspt) { while (r < numOfVertices && usd[r] ) ++r; if (r == numOfVertices) { for (NodeID v = 0; v < numOfVertices; ++v) { index_[v].bpspt_d[i_bpspt] = INF_WEIGHT; bindex_[v].bpspt_d[i_bpspt] = INF_WEIGHT; } continue; } r_usd[r] = true; //r_bp_usd[r] = true; //forward search fill(r_tmp_d.begin(), r_tmp_d.end(), INF_WEIGHT); fill(r_tmp_s.begin(), r_tmp_s.end(), std::make_pair(0, 0)); int que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; r_tmp_d[r] = 0; que_t1 = que_h; int ns = 0; vector<int> vs; vector<NodeID> adj_r(graph.vertices[r + 1] - graph.vertices[r]); for (EdgeID eid = graph.vertices[r]; eid < graph.vertices[r + 1]; eid++) { adj_r[eid - graph.vertices[r]] = graph.edges[eid]; } sort(adj_r.begin(), adj_r.end()); vector<NodeID> r_adj_r(graph.r_vertices[r + 1] - graph.r_vertices[r]); for (EdgeID eid = graph.r_vertices[r]; eid < graph.r_vertices[r + 1]; eid++) { r_adj_r[eid - graph.r_vertices[r]] = graph.r_edges[eid]; } sort(r_adj_r.begin(), r_adj_r.end()); vector<NodeID> common_adj; set_intersection(adj_r.begin(), adj_r.end(), r_adj_r.begin(), r_adj_r.end(), back_inserter(common_adj)); sort(common_adj.begin(), common_adj.end()); for (size_t i = 0; i < common_adj.size(); ++i) { NodeID v = common_adj[i]; if (!r_usd[v]) { r_usd[v] = true; que[que_h++] = v; r_tmp_d[v] = 1; r_tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; eid++) { NodeID tv = graph.edges[eid]; EdgeWeight td = d + 1; if (d > r_tmp_d[tv]); else if (d == r_tmp_d[tv]) { // if (v < tv) { r_sibling_es[num_sibling_es].first = v; r_sibling_es[num_sibling_es].second = tv; ++num_sibling_es; //} } else { if (r_tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; r_tmp_d[tv] = td; } r_child_es[num_child_es].first = v; r_child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = r_sibling_es[i].first, w = r_sibling_es[i].second; //r_tmp_s[v].second |= r_tmp_s[w].first; r_tmp_s[w].second |= r_tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = r_child_es[i].first, c = r_child_es[i].second; r_tmp_s[c].first |= r_tmp_s[v].first; r_tmp_s[c].second |= r_tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { bindex_[inv[v]].bpspt_d[i_bpspt] = r_tmp_d[v]; bindex_[inv[v]].bpspt_s[i_bpspt][0] = r_tmp_s[v].first; bindex_[inv[v]].bpspt_s[i_bpspt][1] = r_tmp_s[v].second & ~r_tmp_s[v].first; } //forward search end //backward usd[r] = true; fill(tmp_d.begin(), tmp_d.end(), INF_WEIGHT); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; ns = 0; vs.clear(); for (size_t i = 0; i < common_adj.size(); ++i) { NodeID v = common_adj[i]; if (!usd[v]) { usd[v] = true; que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.r_vertices[v]; eid < graph.r_vertices[v + 1]; eid++) { NodeID tv = graph.r_edges[eid]; EdgeWeight td = d + 1; if (d > tmp_d[tv]); else if (d == tmp_d[tv]) { //if (v < tv) { sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; //} } else { if (tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = sibling_es[i].first, w = sibling_es[i].second; //tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { index_[inv[v]].bpspt_d[i_bpspt] = tmp_d[v]; index_[inv[v]].bpspt_s[i_bpspt][0] = tmp_s[v].first; index_[inv[v]].bpspt_s[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; } } } cout << "Building normal labels" << endl; //for (size_t r = 0; r < numOfVertices; ++r) { // if (usd[r]) continue; // const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; // index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; // for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { // dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; // } // NodeID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // vis[r] = true; // que_t1 = que_h; // for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { // for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { // NodeID v = que[que_i]; // pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; // index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; // // Prefetch // _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); // _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); // _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); // _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); // if (usd[v]) continue; // for (int i = 0; i < kNumBitParallelRoots; ++i) { // EdgeWeight td = idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; // if (td - 2 <= d) { // td += // (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : // ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | // (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) // ? -1 : 0; // if (td <= d) goto pruned; // } // } // for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { // NodeID w = tmp_idx_v.first[i]; // EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; // if (td <= d) { // //pruning_power[w]++; // goto pruned; // } // } // // Traverse // tmp_idx_v.first.back() = r; // tmp_idx_v.second.back() = d; // tmp_idx_v.first.push_back(numOfVertices); // tmp_idx_v.second.push_back(INF_WEIGHT); // //iteration_generated[r]++; // /*for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i];*/ // for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { // NodeID w = graph.edges[eid]; // if (!vis[w]) { // que[que_h++] = w; // vis[w] = true; // } // } // pruned: // {} // } // que_t0 = que_t1; // que_t1 = que_h; // } // for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; // for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) // dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // usd[r] = true; //} for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &r_idx_v = bindex_[inv[v]]; //index_t &idx_v = index_[inv[v]]; // Prefetch _mm_prefetch(&r_idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&r_idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&r_tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&r_tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = idx_r.bpspt_d[i] + r_idx_v.bpspt_d[i]; if (td - 2 <= d) { td += (idx_r.bpspt_s[i][0] & r_idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & r_idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & r_idx_v.bpspt_s[i][0])) ? -1 : 0; if (td <= d) goto pruned_forward; } } // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &r_idx_r = bindex_[inv[r]]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; //index_t &idx_v = index_[inv[v]]; _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = r_idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; if (td - 2 <= d) { /*td += (r_idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((r_idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (r_idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0;*/ td += ( idx_v.bpspt_s[i][0]) & r_idx_r.bpspt_s[i][0] ? -2 : ((idx_v.bpspt_s[i][1] & r_idx_r.bpspt_s[i][0]) | (idx_v.bpspt_s[i][0] & r_idx_r.bpspt_s[i][1])) ? -1 : 0; if (td <= d) goto pruned_backward; } } // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = graph.r_vertices[v]; eid < graph.r_vertices[v + 1]; ++eid) { NodeID w = graph.r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); index_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); bindex_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } }; class PL_W : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; PL_W(WGraph &wgraph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if( distances[w] > w_d ){ pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } //cout << "total pop:" << pop << endl; //cout << "heap size:" << (double)hsize / (double)numOfVertices << endl; double count = 0; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); count = count + k - 1; index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } cout << "Average Label Size:" << count / numOfVertices << endl; } PL_W(WGraph &wgraph, Ordering &orders, bool DIRECTED, bool PATH_QUERY) { // Generating Path Labels iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = plabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<NodeID> parents(numOfVertices, numOfVertices); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } parents[r] = inv[r]; pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; parents[w] = inv[v]; } } } pruned: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; parents[vis_v] = numOfVertices; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); } } PL_W(WGraph &wgraph, Ordering &orders, bool D_FLAGS) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t>& bindex_ = dlabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } //pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search from r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } PL_W(WGraph &wgraph, Ordering &orders, bool D_FLAGS, bool PATH_QUERY, bool dwpath) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = dplabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t_path>& bindex_ = dplabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<NodeID> parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<NodeID> r_parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); distances[r] = 0; parents[r] = inv[r]; //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &r_tmp_idx_parent_v = r_tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; r_tmp_idx_parent_v.first.back() = r; r_tmp_idx_parent_v.second.back() = parents[v]; r_tmp_idx_parent_v.first.push_back(numOfVertices); r_tmp_idx_parent_v.second.push_back(numOfVertices); /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { parents[w] = inv[v]; pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; parents[vis_v] = numOfVertices; //pqueue.clear(vis_v); } //pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search from r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); r_parents[r] = inv[r]; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = r_parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; r_parents[w] = inv[v]; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; r_parents[vis_v] = numOfVertices; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); tmp_idx_parent[v].first.shrink_to_fit(); tmp_idx_parent[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_p[i] = r_tmp_idx_parent[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); r_tmp_idx_parent[v].first.clear(); r_tmp_idx_parent[v].second.clear(); r_tmp_idx_parent[v].first.shrink_to_fit(); r_tmp_idx_parent[v].second.shrink_to_fit(); } } }; class CPL : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; bool SECOND_LEVEL = false; long children_size; long r_children_size; class nodeid_vector_hasher { public: std::size_t operator()(std::pair<NodeID, std::vector<NodeID> > const& pairvec) const { std::size_t seed = pairvec.second.size(); seed ^= pairvec.first + 0x9e3779b9 + (seed << 6) + (seed >> 2); for(auto& i : pairvec.second) { seed ^= i + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; //using boost::hash_combine template <class T> inline void hash_combine(std::size_t& seed, T const& v) { seed ^= std::hash<T>()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } NodeID convertlist(NodeID& token_id, vector<token_t>& tokens_list, vector<vector<NodeID> >& tmp_tokens, vector<vector<EdgeWeight> >& tmp_tokens_distances, unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher>& token_map, pair<vector<NodeID>, vector<EdgeWeight> > & tmp_idx_v, pair<vector<NodeID>, vector<NodeID> >& tmp_idx_token_parents_v, bool REVERSE){ NodeID lsize = tmp_idx_v.first.size(); NodeID anchorid = tmp_idx_v.first[lsize-2]; for(NodeID i = 0; i < lsize - 1; ++i){ //Add to parent_tree NodeID h = tmp_idx_v.first[i]; NodeID hparent = tmp_idx_token_parents_v.first[i]; EdgeWeight hparent_dis = tmp_idx_token_parents_v.second[i]; NodeID tid = h; // non-trival tokens if(tmp_tokens[h].size() != 0){ vector<NodeID>& tmp_token_h = tmp_tokens[h]; //string tstring = token_string(h, tmp_token_h); vector<EdgeWeight>& tmp_tokens_distances_h = tmp_tokens_distances[h]; pair<NodeID, vector<NodeID> > token_key = make_pair(h, tmp_token_h); // New token if(token_map.find(token_key) == token_map.end()){ token_map[token_key] = token_id; token_t new_token; NodeID csize = tmp_token_h.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = h; new_token.sptc_d[0] = csize; if(REVERSE) r_children_size += (csize + 1); else children_size += (csize + 1); for(NodeID j = 0; j < csize; ++j){ new_token.sptc_v[j+1] = tmp_token_h[j]; new_token.sptc_d[j+1] = tmp_tokens_distances_h[j]; } tokens_list.push_back(new_token); tid = token_id; token_id++; }else // Already exist tid = token_map[token_key]; } //trival tokens if(i == lsize - 2) anchorid = tid; if(hparent!=numOfVertices){ tmp_tokens[hparent].push_back(tid); tmp_tokens_distances[hparent].push_back(hparent_dis); } } return anchorid; } void converttokens(vector<pair<vector<NodeID>, vector<EdgeWeight> > > & tmp_idx, vector<pair<vector<NodeID>, vector<NodeID> > >& tmp_idx_token_parents, bool REVERSE){ vector<token_t> tokens_list; vector<token_t> r_tokens_list; vector<vector<NodeID> > tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > tmp_tokens_distances(numOfVertices); vector<vector<NodeID> > r_tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > r_tmp_tokens_distances(numOfVertices); unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> token_map; unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> r_token_map; if(REVERSE == false) children_size = 0; else r_children_size = 0; NodeID token_id = numOfVertices; NodeID r_token_id = numOfVertices; if(REVERSE == false) clabels.anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); else clabels.r_anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ if(REVERSE == false) clabels.anchor_p[v] = convertlist(token_id, tokens_list, tmp_tokens, tmp_tokens_distances, token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); else clabels.r_anchor_p[v] = convertlist(r_token_id, r_tokens_list, r_tmp_tokens, r_tmp_tokens_distances, r_token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); //if(REVERSE == true) // validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.r_anchor_p[v], que); if(REVERSE == false){ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(tmp_tokens[hparent].empty() == false){ tmp_tokens[hparent].clear(); tmp_tokens_distances[hparent].clear(); } } } }else{ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(r_tmp_tokens[hparent].empty() == false){ r_tmp_tokens[hparent].clear(); r_tmp_tokens_distances[hparent].clear(); } } } } } //vector<vector<bool> > one_level(tokens_list.size()); if(REVERSE == false){ clabels.numOfTokens = tokens_list.size(); clabels.tokenindex_p = (token_t*)memalign(64, clabels.numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } }else{ clabels.r_numOfTokens = r_tokens_list.size(); clabels.r_tokenindex_p = (token_t*)memalign(64, clabels.r_numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } } if(REVERSE == false){ if(SECOND_LEVEL){ vector<token_t> supertokens(numOfVertices); convertsupertokens(tokens_list, supertokens); clabels.supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < supertokens.size(); ++i){ clabels.supertokenindex_p[i] = supertokens[i]; } for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } //convertsupertokens(clabels.tokenindex_p, clabels.supertokenindex_p, clabels.numOfTokens); //vector<NodeID> que(numOfVertices, numOfVertices); //for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { //cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list,supertokens, clabels.anchor_p[v], que); //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.supertokenindex_p, clabels.anchor_p[v], que); //} //} } }else{ if(SECOND_LEVEL){ //convertsupertokens(clabels.r_tokenindex_p, clabels.r_supertokenindex_p, clabels.r_numOfTokens); vector<token_t> r_supertokens(numOfVertices); convertsupertokens(r_tokens_list, r_supertokens); clabels.r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < r_supertokens.size(); ++i){ clabels.r_supertokenindex_p[i] = r_supertokens[i]; } for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } /* //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, r_supertokens, clabels.r_anchor_p[v], que); validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, clabels.r_supertokenindex_p, clabels.r_anchor_p[v], que); //} }*/ } } clabels.total_children = children_size; } void convertsupertokens(vector<token_t>& tokens_list, vector<token_t>& supertokens){ vector<unordered_map<NodeID, NodeID> > sorted_sp(numOfVertices); vector<unordered_map<NodeID, EdgeWeight> > dis_sp(numOfVertices); NodeID total_supertoken_children = 0; for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; unordered_map<NodeID, EdgeWeight>& rd_map = dis_sp[r]; for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; rc_map[cid]++; rd_map[cid] = token.sptc_d[i + 1]; } } //supertokens.resize(numOfVertices); // Creating super tokens, sorting the children based on frequency for(NodeID v = 0; v < numOfVertices; ++v){ vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& vc_map = sorted_sp[v]; unordered_map<NodeID, EdgeWeight>& vd_map = dis_sp[v]; for(unordered_map<NodeID, NodeID>::iterator it = vc_map.begin(); it != vc_map.end(); ++it){ sorted_tmp.push_back(make_pair((*it).second, (*it).first)); } sort(sorted_tmp.rbegin(), sorted_tmp.rend()); token_t new_token; EdgeWeight csize = sorted_tmp.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = csize; new_token.sptc_d[0] = ceil((double)ceil((double)csize / (double)8) / (double)8); for(NodeID i = 0; i < csize; ++i){ NodeID cid = sorted_tmp[i].second; new_token.sptc_v[i + 1] = cid; new_token.sptc_d[i + 1] = vd_map[cid]; } supertokens[v] = new_token; total_supertoken_children += csize; } // Converting each tokens to supertokens vector<bool> isChild(numOfVertices + tokens_list.size(), false); for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; if(csize == 0) continue; /* vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; */ for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = true; } //sort(sorted_tmp.rbegin(), sorted_tmp.rend()); const token_t& supertoken_r = supertokens[r]; //vector<bool>& one_level_t = one_level[t]; //NodeID second_level_length = ceil((double)supertoken_r.sptc_d[0] / (double)8); NodeID first_level_length = ceil((double)supertoken_r.sptc_v[0] / (double)8); //cout << first_level_length << "," << second_level_length << endl; vector<bool> first_level_bv(first_level_length); vector<bool> second_level_bv; // NodeID t1 = 0; NodeID t2 = 1; // NodeID tt = sorted_tmp[t1].second; NodeID st = supertoken_r.sptc_v[t2]; // NodeID ctsize = sorted_tmp.size(); NodeID stsize = supertoken_r.sptc_v[0]; //one_level_t.resize(stsize, false); vector<bool> tmp_set(8, false); for(NodeID i = 0; i < first_level_length; ++i){ NodeID in_batch = false; //if(t1 != ctsize && t2 != stsize) fill(tmp_set.begin(), tmp_set.end(), false); for(NodeID j = 0; j < 8; ++j){ // if(t1 == ctsize) break; if(t2 == (stsize + 1)) break; // tt = sorted_tmp[t1].second; st = supertoken_r.sptc_v[t2]; if(isChild[st]){ tmp_set[j] = true; in_batch = true; } t2++; } if(in_batch == false) first_level_bv[i] = false; else{ first_level_bv[i] = true; for(NodeID j = 0; j < 8; ++j) second_level_bv.push_back(tmp_set[j]); } } for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = false; } NodeID first_level_int_length = ceil((double)first_level_length/(double)8); // bytes NodeID second_level_int_length = ceil((double)second_level_bv.size() / (double)8); // bytes // if(t < 10) cout << "sl:" << r << "," << second_level_int_length << " vs " << second_level_bv.size() << " vs " << token.sptc_d[0] << ";" << stsize << " vs " << first_level_length << endl; //supertoken_r.sptc_v[0] = stsize; //how many children for this supertoken //supertoken_r.sptc_d[0] = first_level_int_length; //how many uchar to store for this token referring to this supertoken = stsize / 8 / 8 token.sptc_d[0] = second_level_int_length; //how many uchar to store for this token in second level // convert first_level_bv -> uint8_t* sptc_fbv // convert second_level_bv -> uint8_t* sptc_sbv // first_level_bv % 8 == 0; // second_level_bv % 8 == 0; token.sptc_fbv = (unsigned char*)memalign(64, first_level_int_length * sizeof(unsigned char)); token.sptc_sbv = (unsigned char*)memalign(64, second_level_int_length * sizeof(unsigned char)); // cout << "first:" << endl; for(NodeID i = 0; i < first_level_int_length; ++i){ token.sptc_fbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_fbv[i] = token.sptc_fbv[i] << 1; if(first_level_bv[i * 8 + j]) ++token.sptc_fbv[i]; } /* bitset<8> x(token.sptc_fbv[i]); cout << x << endl; for(NodeID j = 0; j < 8; ++j){ if(first_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } cout << endl; */ } //cout << endl; //cout << "second:" << endl; for(NodeID i = 0; i < second_level_int_length; ++i){ token.sptc_sbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_sbv[i] = token.sptc_sbv[i] << 1; if(second_level_bv[i * 8 + j]) ++token.sptc_sbv[i]; } // bitset<8> x(token.sptc_sbv[i]); // cout << x ; /* for(NodeID j = 0; j < 8; ++j){ if(second_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } */ // cout << ","; } //cout << endl; } cout << " Number of Supertokens: " << supertokens.size() << endl; cout << " Average Children of Supertokens: " << (double)total_supertoken_children / (double) supertokens.size() << endl; } CPL(Graph &graph, Ordering &orders, bool slevel) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if(tmp_idx_v.second[i] == d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid){ NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } /* for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } */ } CPL(Graph &graph, Ordering &orders, bool slevel, bool D_FLAGS) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); // Backward labels. vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_token_parents_v = r_tmp_idx_token_parents[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if(r_tmp_idx_v.second[i] == d + r_dst_r[w] && r_tmp_idx_token_parents_v.first[i] == numOfVertices){ r_tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); r_tmp_idx_token_parents_v.second[i] = r_dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); r_tmp_idx_token_parents_v.first.back() = numOfVertices; r_tmp_idx_token_parents_v.second.back() = INF_WEIGHT; r_tmp_idx_token_parents_v.first.push_back(numOfVertices); r_tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid){ NodeID w = edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; // Backward search. // Initialize backward labels of r. que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if(tmp_idx_v.second[i] == d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); //converttokens(tmp_idx, tmp_idx_token_parents, r_tmp_idx, r_tmp_idx_token_parents); cout << clabels.numOfTokens << " Tokens in total" << endl; cout << (double)children_size / (double) clabels.numOfTokens << " average children number" << endl; converttokens(r_tmp_idx, r_tmp_idx_token_parents, true); cout << clabels.r_numOfTokens << " Tokens in total" << endl; cout << (double)r_children_size / (double) clabels.r_numOfTokens << " average children number" << endl; vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.r_anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.r_anchor_p[v] = change_anchor[v]; } } }; class CPL_W : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; bool SECOND_LEVEL = false; long children_size; long r_children_size; class nodeid_vector_hasher { public: std::size_t operator()(std::pair<NodeID, std::vector<NodeID> > const& pairvec) const { std::size_t seed = pairvec.second.size(); seed ^= pairvec.first + 0x9e3779b9 + (seed << 6) + (seed >> 2); for(auto& i : pairvec.second) { seed ^= i + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; //using boost::hash_combine template <class T> inline void hash_combine(std::size_t& seed, T const& v) { seed ^= std::hash<T>()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } NodeID convertlist(NodeID& token_id, vector<token_t>& tokens_list, vector<vector<NodeID> >& tmp_tokens, vector<vector<EdgeWeight> >& tmp_tokens_distances, unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher>& token_map, pair<vector<NodeID>, vector<EdgeWeight> > & tmp_idx_v, pair<vector<NodeID>, vector<NodeID> >& tmp_idx_token_parents_v, bool REVERSE){ NodeID lsize = tmp_idx_v.first.size(); NodeID anchorid = tmp_idx_v.first[lsize-2]; for(NodeID i = 0; i < lsize - 1; ++i){ //Add to parent_tree NodeID h = tmp_idx_v.first[i]; NodeID hparent = tmp_idx_token_parents_v.first[i]; EdgeWeight hparent_dis = tmp_idx_token_parents_v.second[i]; NodeID tid = h; // non-trival tokens if(tmp_tokens[h].size() != 0){ vector<NodeID>& tmp_token_h = tmp_tokens[h]; //string tstring = token_string(h, tmp_token_h); vector<EdgeWeight>& tmp_tokens_distances_h = tmp_tokens_distances[h]; pair<NodeID, vector<NodeID> > token_key = make_pair(h, tmp_token_h); // New token if(token_map.find(token_key) == token_map.end()){ token_map[token_key] = token_id; token_t new_token; NodeID csize = tmp_token_h.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = h; new_token.sptc_d[0] = csize; if(REVERSE) r_children_size += (csize + 1); else children_size += (csize + 1); for(NodeID j = 0; j < csize; ++j){ new_token.sptc_v[j+1] = tmp_token_h[j]; new_token.sptc_d[j+1] = tmp_tokens_distances_h[j]; } tokens_list.push_back(new_token); tid = token_id; token_id++; }else // Already exist tid = token_map[token_key]; } //trival tokens if(i == lsize - 2) anchorid = tid; if(hparent!=numOfVertices){ tmp_tokens[hparent].push_back(tid); tmp_tokens_distances[hparent].push_back(hparent_dis); } } return anchorid; } void converttokens(vector<pair<vector<NodeID>, vector<EdgeWeight> > > & tmp_idx, vector<pair<vector<NodeID>, vector<NodeID> > >& tmp_idx_token_parents, bool REVERSE){ vector<token_t> tokens_list; vector<token_t> r_tokens_list; vector<vector<NodeID> > tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > tmp_tokens_distances(numOfVertices); vector<vector<NodeID> > r_tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > r_tmp_tokens_distances(numOfVertices); unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> token_map; unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> r_token_map; if(REVERSE == false) children_size = 0; else r_children_size = 0; NodeID token_id = numOfVertices; NodeID r_token_id = numOfVertices; if(REVERSE == false) clabels.anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); else clabels.r_anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ if(REVERSE == false) clabels.anchor_p[v] = convertlist(token_id, tokens_list, tmp_tokens, tmp_tokens_distances, token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); else clabels.r_anchor_p[v] = convertlist(r_token_id, r_tokens_list, r_tmp_tokens, r_tmp_tokens_distances, r_token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); //if(REVERSE == true) // validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.r_anchor_p[v], que); if(REVERSE == false){ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(tmp_tokens[hparent].empty() == false){ tmp_tokens[hparent].clear(); tmp_tokens_distances[hparent].clear(); } } } }else{ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(r_tmp_tokens[hparent].empty() == false){ r_tmp_tokens[hparent].clear(); r_tmp_tokens_distances[hparent].clear(); } } } } } //vector<vector<bool> > one_level(tokens_list.size()); if(REVERSE == false){ clabels.numOfTokens = tokens_list.size(); clabels.tokenindex_p = (token_t*)memalign(64, clabels.numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } }else{ clabels.r_numOfTokens = r_tokens_list.size(); clabels.r_tokenindex_p = (token_t*)memalign(64, clabels.r_numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } } if(REVERSE == false){ if(SECOND_LEVEL){ vector<token_t> supertokens(numOfVertices); convertsupertokens(tokens_list, supertokens); clabels.supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < supertokens.size(); ++i){ clabels.supertokenindex_p[i] = supertokens[i]; } for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } //convertsupertokens(clabels.tokenindex_p, clabels.supertokenindex_p, clabels.numOfTokens); //vector<NodeID> que(numOfVertices, numOfVertices); //for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { //cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list,supertokens, clabels.anchor_p[v], que); //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.supertokenindex_p, clabels.anchor_p[v], que); //} //} } }else{ if(SECOND_LEVEL){ //convertsupertokens(clabels.r_tokenindex_p, clabels.r_supertokenindex_p, clabels.r_numOfTokens); vector<token_t> r_supertokens(numOfVertices); convertsupertokens(r_tokens_list, r_supertokens); clabels.r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < r_supertokens.size(); ++i){ clabels.r_supertokenindex_p[i] = r_supertokens[i]; } for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } /* //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, r_supertokens, clabels.r_anchor_p[v], que); validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, clabels.r_supertokenindex_p, clabels.r_anchor_p[v], que); //} }*/ } } clabels.total_children = children_size; } void convertsupertokens(vector<token_t>& tokens_list, vector<token_t>& supertokens){ vector<unordered_map<NodeID, NodeID> > sorted_sp(numOfVertices); vector<unordered_map<NodeID, EdgeWeight> > dis_sp(numOfVertices); NodeID total_supertoken_children = 0; for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; unordered_map<NodeID, EdgeWeight>& rd_map = dis_sp[r]; for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; rc_map[cid]++; rd_map[cid] = token.sptc_d[i + 1]; } } //supertokens.resize(numOfVertices); // Creating super tokens, sorting the children based on frequency for(NodeID v = 0; v < numOfVertices; ++v){ vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& vc_map = sorted_sp[v]; unordered_map<NodeID, EdgeWeight>& vd_map = dis_sp[v]; for(unordered_map<NodeID, NodeID>::iterator it = vc_map.begin(); it != vc_map.end(); ++it){ sorted_tmp.push_back(make_pair((*it).second, (*it).first)); } sort(sorted_tmp.rbegin(), sorted_tmp.rend()); token_t new_token; EdgeWeight csize = sorted_tmp.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = csize; new_token.sptc_d[0] = ceil((double)ceil((double)csize / (double)8) / (double)8); for(NodeID i = 0; i < csize; ++i){ NodeID cid = sorted_tmp[i].second; new_token.sptc_v[i + 1] = cid; new_token.sptc_d[i + 1] = vd_map[cid]; } supertokens[v] = new_token; total_supertoken_children += csize; } // Converting each tokens to supertokens vector<bool> isChild(numOfVertices + tokens_list.size(), false); for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; if(csize == 0) continue; /* vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; */ for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = true; } //sort(sorted_tmp.rbegin(), sorted_tmp.rend()); const token_t& supertoken_r = supertokens[r]; //vector<bool>& one_level_t = one_level[t]; //NodeID second_level_length = ceil((double)supertoken_r.sptc_d[0] / (double)8); NodeID first_level_length = ceil((double)supertoken_r.sptc_v[0] / (double)8); //cout << first_level_length << "," << second_level_length << endl; vector<bool> first_level_bv(first_level_length); vector<bool> second_level_bv; // NodeID t1 = 0; NodeID t2 = 1; // NodeID tt = sorted_tmp[t1].second; NodeID st = supertoken_r.sptc_v[t2]; // NodeID ctsize = sorted_tmp.size(); NodeID stsize = supertoken_r.sptc_v[0]; //one_level_t.resize(stsize, false); vector<bool> tmp_set(8, false); for(NodeID i = 0; i < first_level_length; ++i){ NodeID in_batch = false; //if(t1 != ctsize && t2 != stsize) fill(tmp_set.begin(), tmp_set.end(), false); for(NodeID j = 0; j < 8; ++j){ // if(t1 == ctsize) break; if(t2 == (stsize + 1)) break; // tt = sorted_tmp[t1].second; st = supertoken_r.sptc_v[t2]; if(isChild[st]){ tmp_set[j] = true; in_batch = true; } t2++; } if(in_batch == false) first_level_bv[i] = false; else{ first_level_bv[i] = true; for(NodeID j = 0; j < 8; ++j) second_level_bv.push_back(tmp_set[j]); } } for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = false; } NodeID first_level_int_length = ceil((double)first_level_length/(double)8); // bytes NodeID second_level_int_length = ceil((double)second_level_bv.size() / (double)8); // bytes // if(t < 10) cout << "sl:" << r << "," << second_level_int_length << " vs " << second_level_bv.size() << " vs " << token.sptc_d[0] << ";" << stsize << " vs " << first_level_length << endl; //supertoken_r.sptc_v[0] = stsize; //how many children for this supertoken //supertoken_r.sptc_d[0] = first_level_int_length; //how many uchar to store for this token referring to this supertoken = stsize / 8 / 8 token.sptc_d[0] = second_level_int_length; //how many uchar to store for this token in second level // convert first_level_bv -> uint8_t* sptc_fbv // convert second_level_bv -> uint8_t* sptc_sbv // first_level_bv % 8 == 0; // second_level_bv % 8 == 0; token.sptc_fbv = (unsigned char*)memalign(64, first_level_int_length * sizeof(unsigned char)); token.sptc_sbv = (unsigned char*)memalign(64, second_level_int_length * sizeof(unsigned char)); // cout << "first:" << endl; for(NodeID i = 0; i < first_level_int_length; ++i){ token.sptc_fbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_fbv[i] = token.sptc_fbv[i] << 1; if(first_level_bv[i * 8 + j]) ++token.sptc_fbv[i]; } /* bitset<8> x(token.sptc_fbv[i]); cout << x << endl; for(NodeID j = 0; j < 8; ++j){ if(first_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } cout << endl; */ } //cout << endl; //cout << "second:" << endl; for(NodeID i = 0; i < second_level_int_length; ++i){ token.sptc_sbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_sbv[i] = token.sptc_sbv[i] << 1; if(second_level_bv[i * 8 + j]) ++token.sptc_sbv[i]; } // bitset<8> x(token.sptc_sbv[i]); // cout << x ; /* for(NodeID j = 0; j < 8; ++j){ if(second_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } */ // cout << ","; } //cout << endl; } cout << " Number of Supertokens: " << supertokens.size() << endl; cout << " Average Children of Supertokens: " << (double)total_supertoken_children / (double) supertokens.size() << endl; } CPL_W(WGraph &wgraph, Ordering &orders, bool slevel) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if(tmp_idx_v.second[i] == v_d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if( distances[w] > w_d ){ pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } //cout << "total pop:" << pop << endl; //cout << "heap size:" << (double)hsize / (double)numOfVertices << endl; converttokens(tmp_idx, tmp_idx_token_parents, false); vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } /* double count = 0; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); count = count + k - 1; index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } cout << "Average Label Size:" << count / numOfVertices << endl; */ } CPL_W(WGraph &wgraph, Ordering &orders, bool slevel, bool D_FLAGS) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t>& bindex_ = dlabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_token_parents_v = r_tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if(r_tmp_idx_v.second[i] == v_d + r_dst_r[w] && r_tmp_idx_token_parents_v.first[i] == numOfVertices){ r_tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); r_tmp_idx_token_parents_v.second[i] = r_dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; r_tmp_idx_token_parents_v.first.back() = numOfVertices; r_tmp_idx_token_parents_v.second.back() = INF_WEIGHT; r_tmp_idx_token_parents_v.first.push_back(numOfVertices); r_tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } //pqueue.clear_n(); // Backward search from r. pqueue.update(r, 0); while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if(tmp_idx_v.second[i] == v_d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); //converttokens(tmp_idx, tmp_idx_token_parents, r_tmp_idx, r_tmp_idx_token_parents); cout << clabels.numOfTokens << " Tokens in total" << endl; cout << (double)children_size / (double) clabels.numOfTokens << " average children number" << endl; converttokens(r_tmp_idx, r_tmp_idx_token_parents, true); cout << clabels.r_numOfTokens << " Tokens in total" << endl; cout << (double)r_children_size / (double) clabels.r_numOfTokens << " average children number" << endl; vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.r_anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.r_anchor_p[v] = change_anchor[v]; } /* for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); }*/ } }; class Bottomup : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; typedef vector<pair<NodeID, EdgeWeight> > Bucket; vector<bool> contracted; vector<Bucket> mtmBucket; vector<EdgeWeight> possibleWitness; int relabelByOrder(CHGraph &chgraph, Ordering &orders) { int totalEdge = 0; vector<NodeID>& inv = orders.inv; vector<NodeID>& rank = orders.rank; for (NodeID v = 0; v < numOfVertices; ++v) rank[inv[v]] = v; vector<vector<CHGraph::CH_Edge> > new_adj(numOfVertices); vector<vector<CHGraph::CH_Edge> > new_r_adj(numOfVertices); //for (int i = 0; i < numOfVertices; ++i) { // for (int j = 0; j < chgraph.adj[i].size(); ++j) { // if (j != chgraph.adj[i].size() - 1) // if (chgraph.adj[i][j].target == chgraph.adj[i][j + 1].target) { // cout << i << " hahaah:" << chgraph.adj[i][j].weight << "," << chgraph.adj[i][j + 1].weight << endl; // } // } //} for (NodeID v = 0; v < numOfVertices; ++v) { for (NodeID i = 0; i < chgraph.adj[v].size(); ++i) new_adj[rank[v]].push_back(CHGraph::CH_Edge(rank[chgraph.adj[v][i].target], 0, chgraph.adj[v][i].weight)); totalEdge += chgraph.adj[v].size(); if (DIRECTED_FLAG == true) { totalEdge += chgraph.r_adj[v].size(); for (NodeID i = 0; i < chgraph.r_adj[v].size(); ++i) new_r_adj[rank[v]].push_back(CHGraph::CH_Edge(rank[chgraph.r_adj[v][i].target], 0, chgraph.r_adj[v][i].weight)); } } chgraph.adj.swap(new_adj); if (DIRECTED_FLAG == true) { chgraph.r_adj.swap(new_r_adj); } for (int i = 0; i < numOfVertices; ++i) { if (DIRECTED_FLAG == true) { sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); } } new_adj.clear(); if (DIRECTED_FLAG == true) { new_r_adj.clear(); } return totalEdge; } int witness_search(NodeID v, CHGraph& chgraph, const int hopLimitsParameter, Ordering& orders, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<bool>& hitTarget) { int addShortcuts = 0; vector<CHGraph::CH_Edge>& inEdges = chgraph.adj[v]; vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; vector<pair<NodeID, CHGraph::CH_Edge> > possibleShortcuts; // 1-hop witness search if (hopLimitsParameter == 1) { for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; for (int j = 0; j < outEdges.size(); ++j) { //if (outEdges[j].level == V) continue; NodeID outNode = outEdges[j].target; if (outNode > v ) continue; // Skip the nodes that have been already contracted. if (outNode >= inNode) continue; // For undirected case, only test each pair once and we also skip the case inNode == outNode EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; // Distance for the path (inNode - v - outNode). bool foundWitness = false; for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID outNeighborOfInNode = outEdgesOfInNode[k].target; // if (outNeighborLevelOfInNode == V) continue; if (outNeighborOfInNode >= v) continue; // Skip the ondes that have been already contracted. if (outNeighborOfInNode == outNode) { EdgeWeight outNeighborWeightOfInNode = outEdgesOfInNode[k].weight; // Distance for the direct path (inNode - outNode). if (outNeighborWeightOfInNode <= walkThroughWeight) { foundWitness = true; //walkThroughWeight = outNeighborWeightOfInNode; } break; } } if (foundWitness == false) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } } } // 2-hop witness search. if (hopLimitsParameter == 2) { // Init the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; // Skip the nodes that have been already contracted. And skip the current contracted node (this is important). mtmBucket[inNeighborOfOutNode].push_back(make_pair(outNode, inNeighborWeightOfOutNode)); } } // Finish the init. for (int i = 0; i < inEdges.size(); ++i) { NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have bee n already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; // One-hop witness search from the bucket of inNode. Bucket& bucketInNode = mtmBucket[inNode]; for (int k = 0; k < bucketInNode.size(); ++k) { NodeID target = bucketInNode[k].first; EdgeWeight targetWeight = bucketInNode[k].second; if (target >= inNode) continue; if (possibleWitness[target] > targetWeight) possibleWitness[target] = targetWeight; } // Two-hop witness search from inNode. // 1-hop forward search from inNode and scan the buckets of the reached nodes to find the length of 2-hop witness. for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID reachNode = outEdgesOfInNode[k].target; if (reachNode >= v) continue;// Skip the nodes that have been already contracted. NodeID reachNodeLevel = outEdgesOfInNode[k].level; EdgeWeight reachNodeWeight = outEdgesOfInNode[k].weight; Bucket& bucketReachNode = mtmBucket[reachNode]; for (int q = 0; q < bucketReachNode.size(); ++q) { NodeID target = bucketReachNode[q].first; if (target >= inNode) continue; EdgeWeight newTargetWeight = bucketReachNode[q].second + reachNodeWeight; if (possibleWitness[target] > newTargetWeight) possibleWitness[target] = newTargetWeight; } } // Scan the outNode of v, to check whether shortcuts are needed for (inNode - v - outNode). for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) { possibleWitness[outNode] = INF_WEIGHT; continue; } if (outNode >= inNode) { possibleWitness[outNode] = INF_WEIGHT; continue; }// undirected, scan each pair only once. EdgeWeight outWeight = outEdges[j].weight; EdgeWeight witnessWeight = possibleWitness[outNode]; possibleWitness[outNode] = INF_WEIGHT; EdgeWeight walkThroughWeight = inWeight + outWeight; if(witnessWeight > walkThroughWeight) possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Cleanup the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) continue; EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; if(!mtmBucket[inNeighborOfOutNode].empty()) mtmBucket[inNeighborOfOutNode].clear(); } } } // Dijkstra Local Search. if (hopLimitsParameter > 2) { // Init the target table. int noOfTarget = 0; for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = true; noOfTarget++; } // Loop the incoming node for witness search. for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; int visitedTarget = 0; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; pqueue.update(inNode, 0); visited_set.insert(inNode); distances[inNode] = 0; while (!pqueue.empty()) { NodeID u; EdgeWeight u_d; pqueue.extract_min(u, u_d); vis[u] = true; if (hitTarget[u] == true) visitedTarget++; if (visitedTarget == noOfTarget) break; for (int j = 0; j < chgraph.adj[u].size(); ++j) { NodeID w = chgraph.adj[u][j].target; EdgeWeight w_d = chgraph.adj[u][j].weight + u_d; if (w >= v) continue; // Can not visit contracted nodes. if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; visited_set.insert(w); } } } } // Test witness for all outNode. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; if (outNode >= inNode) continue; // undirected, scan each pair only once. EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; if (distances[outNode] > walkThroughWeight) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; } while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); } visited_set.clear(); } // Clean the target table for the next contracted node. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = false; } } // Append the shortcuts. for (int i = 0; i < possibleShortcuts.size(); ++i) { NodeID fromNode = possibleShortcuts[i].first; NodeID toNode = possibleShortcuts[i].second.target; NodeID level = possibleShortcuts[i].second.level; EdgeWeight weight = possibleShortcuts[i].second.weight; addShortcuts++; addShortcuts++; int fromAdjSize = chgraph.adj[fromNode].size(); bool skipfrom = false; for (int j = fromAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[fromNode][j].target == toNode) { if (weight > chgraph.adj[fromNode][j].weight) break; chgraph.adj[fromNode][j].weight = weight; chgraph.adj[fromNode][j].level = level; skipfrom = true; addShortcuts--; break; } } if (!skipfrom) { chgraph.adj[fromNode].push_back(CHGraph::CH_Edge(toNode, level, weight)); } int toAdjSize = chgraph.adj[toNode].size(); bool skipto = false; for (int j = toAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[toNode][j].target == fromNode) { if (weight > chgraph.adj[toNode][j].weight) break; chgraph.adj[toNode][j].weight = weight; chgraph.adj[toNode][j].level = level; skipto = true; addShortcuts--; break; } } if (!skipto) chgraph.adj[toNode].push_back(CHGraph::CH_Edge(fromNode, level, weight)); } addShortcuts -= chgraph.adj[v].size(); // Two times because it will appear in others' adj. possibleShortcuts.clear(); return addShortcuts; } int witness_search_directed(NodeID v, CHGraph& chgraph, const int hopLimitsParameter, Ordering& orders, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<bool>& hitTarget) { int addShortcuts = 0; vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; vector<pair<NodeID, CHGraph::CH_Edge> > possibleShortcuts; // 1-hop witness search if (hopLimitsParameter == 1) { for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; for (int j = 0; j < outEdges.size(); ++j) { //if (outEdges[j].level == V) continue; NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. if (outNode == inNode) continue; EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; // Distance for the path (inNode - v - outNode). bool foundWitness = false; for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID outNeighborOfInNode = outEdgesOfInNode[k].target; // if (outNeighborLevelOfInNode == V) continue; if (outNeighborOfInNode >= v) continue; // Skip the ondes that have been already contracted. if (outNeighborOfInNode == outNode) { EdgeWeight outNeighborWeightOfInNode = outEdgesOfInNode[k].weight; // Distance for the direct path (inNode - outNode). if (outNeighborWeightOfInNode <= walkThroughWeight) { foundWitness = true; //walkThroughWeight = outNeighborWeightOfInNode; } break; } } if (foundWitness == false) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } } } // 2-hop witness search. if (hopLimitsParameter == 2) { // Init the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.r_adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; // Skip the nodes that have been already contracted. And skip the current contracted node (this is important). mtmBucket[inNeighborOfOutNode].push_back(make_pair(outNode, inNeighborWeightOfOutNode)); } } // Finish the init. for (int i = 0; i < inEdges.size(); ++i) { NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have bee n already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; // One-hop witness search from the bucket of inNode. Bucket& bucketInNode = mtmBucket[inNode]; for (int k = 0; k < bucketInNode.size(); ++k) { NodeID target = bucketInNode[k].first; EdgeWeight targetWeight = bucketInNode[k].second; //if (target >= inNode) continue; if (possibleWitness[target] > targetWeight) possibleWitness[target] = targetWeight; } // Two-hop witness search from inNode. // 1-hop forward search from inNode and scan the buckets of the reached nodes to find the length of 2-hop witness. for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID reachNode = outEdgesOfInNode[k].target; if (reachNode >= v) continue;// Skip the nodes that have been already contracted. NodeID reachNodeLevel = outEdgesOfInNode[k].level; EdgeWeight reachNodeWeight = outEdgesOfInNode[k].weight; Bucket& bucketReachNode = mtmBucket[reachNode]; for (int q = 0; q < bucketReachNode.size(); ++q) { NodeID target = bucketReachNode[q].first; // if (target >= inNode) continue; EdgeWeight newTargetWeight = bucketReachNode[q].second + reachNodeWeight; if (possibleWitness[target] > newTargetWeight) possibleWitness[target] = newTargetWeight; } } // Scan the outNode of v, to check whether shortcuts are needed for (inNode - v - outNode). for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) { possibleWitness[outNode] = INF_WEIGHT; continue; } if (outNode == inNode) { possibleWitness[outNode] = INF_WEIGHT; continue; } EdgeWeight outWeight = outEdges[j].weight; EdgeWeight witnessWeight = possibleWitness[outNode]; possibleWitness[outNode] = INF_WEIGHT; EdgeWeight walkThroughWeight = inWeight + outWeight; if (witnessWeight > walkThroughWeight) possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Cleanup the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) continue; EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.r_adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; if (!mtmBucket[inNeighborOfOutNode].empty()) mtmBucket[inNeighborOfOutNode].clear(); } } } // Dijkstra Local Search. if (hopLimitsParameter > 2) { // Init the target table. int noOfTarget = 0; for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = true; noOfTarget++; } // Loop the incoming node for witness search. for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; int visitedTarget = 0; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; pqueue.update(inNode, 0); visited_set.insert(inNode); distances[inNode] = 0; while (!pqueue.empty()) { NodeID u; EdgeWeight u_d; pqueue.extract_min(u, u_d); vis[u] = true; if (hitTarget[u] == true) visitedTarget++; if (visitedTarget == noOfTarget) break; for (int j = 0; j < chgraph.adj[u].size(); ++j) { NodeID w = chgraph.adj[u][j].target; EdgeWeight w_d = chgraph.adj[u][j].weight + u_d; if (w >= v) continue; // Can not visit contracted nodes. if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; visited_set.insert(w); } } } } // Test witness for all outNode. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; if (outNode == inNode) continue; EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; if (distances[outNode] > walkThroughWeight) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; } while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); } visited_set.clear(); } // Clean the target table for the next contracted node. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = false; } } // Append the shortcuts. for (int i = 0; i < possibleShortcuts.size(); ++i) { addShortcuts += 2; NodeID fromNode = possibleShortcuts[i].first; NodeID toNode = possibleShortcuts[i].second.target; NodeID level = possibleShortcuts[i].second.level; EdgeWeight weight = possibleShortcuts[i].second.weight; int fromAdjSize = chgraph.adj[fromNode].size(); bool skipfrom = false; for (int j = fromAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[fromNode][j].target == toNode) { if (weight > chgraph.adj[fromNode][j].weight) break; chgraph.adj[fromNode][j].weight = weight; chgraph.adj[fromNode][j].level = level; skipfrom = true; addShortcuts--; break; } } if (!skipfrom) { chgraph.adj[fromNode].push_back(CHGraph::CH_Edge(toNode, level, weight)); } int toAdjSize = chgraph.r_adj[toNode].size(); bool skipto = false; for (int j = toAdjSize - 1; j + 1 > 0; --j) { if (chgraph.r_adj[toNode][j].target == fromNode) { if (weight > chgraph.r_adj[toNode][j].weight) break; chgraph.r_adj[toNode][j].weight = weight; chgraph.r_adj[toNode][j].level = level; skipto = true; addShortcuts--; break; } } if (!skipto) chgraph.r_adj[toNode].push_back(CHGraph::CH_Edge(fromNode, level, weight)); } addShortcuts -= 2 * chgraph.adj[v].size(); // 2 times because these out (in) edges will appear in others' in (out) edges. addShortcuts -= 2 * chgraph.r_adj[v].size(); possibleShortcuts.clear(); return addShortcuts; } int build_shortcuts_dij(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<bool>& waitForPop, vector<vector<CHGraph::CH_Edge> >& shortcuts) { // All dsitances[] is INF_WEIGHT, all vis[] is false, all max_parents[] is numOfVertices if (source == 0) return 0; // No shortcut for the most important vertex int uncover_count = source; pqueue.update(source, 0); distances[source] = 0; //isNeighbor[source] = true; visited_set.insert(source); int in_que_cover_wait_for_pop = -1; int in_que_cover = 0; int in_que = 1; /*if (source == 3) cout << "one search" << endl;*/ //int sspace = 0; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); in_que--; vis[v] = true; // sspace++; // cout << "source: " << source << " visiting:" << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; if (max_parents[v] != numOfVertices) { // If v is the first vertex having higher rank than source along its shortest path, add a shortcut. if (max_parents[v] == v && isNeighbor[v] == false) { shortcuts[source].push_back(CHGraph::CH_Edge(v, source, v_d)); in_que_cover_wait_for_pop--; } else if (max_parents[v] == v && isNeighbor[v] == true) { in_que_cover_wait_for_pop--; } if (v < source) uncover_count--; if (uncover_count == 0) { // All vertices higher rank than source have been visited. // cout << "good 1" << endl; break; } in_que_cover--; } // if (in_que == in_que_cover && v != source && in_que != 0)// All elments in queue have been covered. // continue; //if (source == 3) // cout << "v: " << v << endl //if (in_que == in_que_cover && v != source && in_que_cover_wait_for_pop != -1 && in_que_cover_wait_for_pop != 0) { // cout << "source: " << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; // // cout << "good 2" << endl; // continue; //}; for (int i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i].target; EdgeWeight w_d = adj[v][i].weight + v_d; if (!vis[w]) { // When it is first inserted into the queue. if (distances[w] == INF_WEIGHT && w_d < distances[w]) { in_que++; visited_set.insert(w); } if (distances[w] > w_d) { distances[w] = w_d; pqueue.update(w, w_d); //if (source == 3) // cout << v << "," << max_parents[v] << "," << w << "," << max_parents[w] << endl; if (v == source) isNeighbor[w] = true; int uncover_flag = false; if (max_parents[w] == numOfVertices) uncover_flag = true; // // w is the first vertex has higher rank than source. // if (w < source && max_parents[v] == numOfVertices) { // // Newly covered vertex in queue. // /*if (max_parents[w] == numOfVertices) { // in_que_cover++; */ // if(waitForPop[w] == false){ // if (in_que_cover_wait_for_pop == -1) // in_que_cover_wait_for_pop = 0; // in_que_cover_wait_for_pop++; // waitForPop[w] = true; // } // max_parents[w] = w; // } // // // w-source has already been hit by previous max_parents[v]. // if (max_parents[v] != numOfVertices) { // // Newly covered vertex in queue. ///* if(max_parents[w] == numOfVertices) // in_que_cover++;*/ // if (waitForPop[w] == true) { // if (in_que_cover_wait_for_pop == -1) // in_que_cover_wait_for_pop = 0; // else // in_que_cover_wait_for_pop--; // waitForPop[w] = false; // } // max_parents[w] = max_parents[v]; // } // // w has already been covered previously but it is not the shortest path. it has not been covered yet. // if (w > source && max_parents[v] == numOfVertices && max_parents[w] != numOfVertices) { // max_parents[w] = numOfVertices; // in_que_cover--; // } // // w has not yet been covered. it is the first time it has been ocvered. // if (uncover_flag == true && max_parents[w] != numOfVertices) // in_que_cover++; // // The shorter path comes from u-x-v-w rather than u(v)-w. if (isNeighbor[w] == true && v != source) { isNeighbor[w] = false; } // w has not been covered yet. if (max_parents[w] == numOfVertices) { // w is the first vertex has higher rank than source. if (w < source && max_parents[v] == numOfVertices) { // Newly covered vertex in queue. /*if (max_parents[w] == numOfVertices) { in_que_cover++; */ if(waitForPop[w] == false){ if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; in_que_cover_wait_for_pop++; waitForPop[w] = true; } max_parents[w] = w; } // w-source has already been hit by previous max_parents[v]. if (max_parents[v] != numOfVertices) { // Newly covered vertex in queue. /* if(max_parents[w] == numOfVertices) in_que_cover++;*/ if (waitForPop[w] == true) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; else in_que_cover_wait_for_pop--; waitForPop[w] = false; } max_parents[w] = max_parents[v]; } // w has not yet been covered. it is the first time it has been ocvered. if (uncover_flag == true && max_parents[w] != numOfVertices) in_que_cover++; } else { // max_parents[w] != numOfVertices // w has been covered previously // v has not been covered if (max_parents[v] == numOfVertices) { if (w < source) {//w is the first higher rank vertex in this shortest path if (waitForPop[w] == false) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; in_que_cover_wait_for_pop++; waitForPop[w] = true; } max_parents[w] = w; } else {//w has not been covered yet, remove its covered information max_parents[w] = numOfVertices; in_que_cover--; } } // w has already been covered by v. if w is the first higher rank vertex previously, remove this information if (max_parents[v] != numOfVertices) { if (max_parents[w] == w) { if (waitForPop[w] == true) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; else in_que_cover_wait_for_pop--; waitForPop[w] = false; } max_parents[w] = max_parents[v]; } } } } } } if (in_que == in_que_cover && v != source && in_que_cover_wait_for_pop != -1 && in_que_cover_wait_for_pop == 0) { // cout << "source: " << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; // cout << "good 2" << endl; break; } } // cout << "search space:" << sspace << endl; // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; max_parents[cv] = numOfVertices; isNeighbor[cv] = false; pqueue.clear(cv); waitForPop[cv] = false; } /* while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); }*/ pqueue.clear_n(); visited_set.clear(); return 0; } //int build_shortcuts_dij(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts) { // int uncover_count = source; // pqueue.update(source, 0); // distances[source] = 0; // //isNeighbor[source] = true; // visited_set.insert(source); // while (!pqueue.empty()) { // NodeID v; // EdgeWeight v_d; // pqueue.extract_min(v, v_d); // vis[v] = true; // if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. // if (max_parents[v] == v && isNeighbor[v] == false) // shortcuts[source].push_back(CHGraph::CH_Edge(v, source, v_d)); // if (v < source) // uncover_count--; // if (uncover_count == 0) // All vertices higher rank than source have been covered. // break; // continue; // } // for (int i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i].target; // EdgeWeight w_d = adj[v][i].weight + v_d; // if (!vis[w]) { // if (distances[w] > w_d) { // distances[w] = w_d; // pqueue.update(w, w_d); // visited_set.insert(w); // if (v == source) // isNeighbor[w] = true; // // w is the first vertex has higher rank than source. // if (w < source && max_parents[v] == numOfVertices) // max_parents[w] = w; // // w-source has already been hit by previous max_parents[v]. // if (max_parents[v] != numOfVertices) // max_parents[w] = max_parents[v]; // // The shorter path comes from u-x-v-w rather than u(v)-w. // if (isNeighbor[w] == true && v != source) { // isNeighbor[w] = false; // } // /* if (max_parents[w] > w) // max_parents[w] = w; // if(max_parents[w] > max_parents[v]) // max_parents[w] = max_parents[v];*/ // } // } // } // } // // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. // for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { // NodeID cv = *it; // vis[cv] = false; // distances[cv] = INF_WEIGHT; // max_parents[cv] = numOfVertices; // isNeighbor[cv] = false; // pqueue.clear(cv); // } // /* while (!pqueue.empty()) { // NodeID tmpv; // EdgeWeight tmpweight; // pqueue.extract_min(tmpv, tmpweight); // }*/ // pqueue.clear_n(); // visited_set.clear(); // return 0; //} //int build_shortcuts_bfs(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts, vector<NodeID>& que) { // int uncover_count = source; // //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // // NodeID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = source; // vis[source] = true; // que_t1 = que_h; // for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { // for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { // NodeID v = que[que_i]; // if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. // // if (max_parents[v] == v && isNeighbor[v] == false) // shortcuts[source].push_back(CHGraph::CH_Edge(v, source, d)); // // if (v < source) // uncover_count--; // if (uncover_count == 0) { // All vertices higher rank than source have been covered. // break; // } // continue; // } // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i].target; // if (!vis[w]) { // vis[w] = true; // if (v == source) // isNeighbor[w] = true; // // w is the first vertex has higher rank than source. // if (w < source )//&& max_parents[v] == numOfVertices) // max_parents[w] = w; // //// w-source has already been hit by previous max_parents[v]. // //if (max_parents[v] != numOfVertices) { // // max_parents[w] = max_parents[v]; // // continue; // //} // // //// The shorter path comes from u-x-v-w rather than u(v)-w. *We wont have this case in unweigted graphs. // //if (isNeighbor[w] == true && v != source) { // // isNeighbor[w] = false; // //} // que[que_h++] = w; // } // } // pruned: // {} // } // que_t0 = que_t1; // que_t1 = que_h; // } // for (size_t i = 0; i < que_h; ++i) { // vis[que[i]] = false; // max_parents[que[i]] = numOfVertices; // isNeighbor[que[i]] = false; // } //} int build_shortcuts_bfs(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts, vector<NodeID>& que) { int uncover_count = source; //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = source; vis[source] = true; que_t1 = que_h; int in_que = 1; int in_que_cover = 0; int in_que_wait_for_pop = -1; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; in_que--; if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. if (max_parents[v] == v && isNeighbor[v] == false) { shortcuts[source].push_back(CHGraph::CH_Edge(v, source, d)); in_que_wait_for_pop--; } if(max_parents[v] == v && isNeighbor[v] == true) in_que_wait_for_pop--; if (v < source) uncover_count--; if (uncover_count == 0) { // All vertices higher rank than source have been covered. goto jumpout; } in_que_cover--; } for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i].target; if (!vis[w]) { vis[w] = true; in_que++; if (v == source) isNeighbor[w] = true; // w is the first vertex has higher rank than source. if (w < source && max_parents[v] == numOfVertices) {//&& max_parents[v] == numOfVertices) max_parents[w] = w; if (in_que_wait_for_pop == -1) in_que_wait_for_pop = 0; in_que_wait_for_pop++; in_que_cover++; } if (max_parents[v] != numOfVertices) { in_que_cover++; max_parents[w] = max_parents[v]; } //// w-source has already been hit by previous max_parents[v]. //if (max_parents[v] != numOfVertices) { // max_parents[w] = max_parents[v]; // continue; //} //// The shorter path comes from u-x-v-w rather than u(v)-w. *We wont have this case in unweigted graphs. //if (isNeighbor[w] == true && v != source) { // isNeighbor[w] = false; //} que[que_h++] = w; } } if (in_que == in_que_cover && in_que_wait_for_pop == 0) { goto jumpout; } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } jumpout: {} for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; max_parents[que[i]] = numOfVertices; isNeighbor[que[i]] = false; } } void labeling(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { if(chgraph.adj[i].size() != 0) sort(chgraph.adj[i].begin(), chgraph.adj[i].end()); if (DIRECTED_FLAG == true) { if (chgraph.r_adj[i].size() != 0) sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); } } vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; //vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; // Start to process every nodes. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; if (DIRECTED_FLAG == false) { //for (int i = 0; i < tmp_idx_v.first.size() - 1; ++i) // dst_r[tmp_idx_v.first[i]] = tmp_idx_v.second[i]; // Search all the neighrbor u of v. for (int i = 0; i < adj_v.size(); ++i) { NodeID u = adj_v[i].target; NodeID level = adj_v[i].level; if (u >= v ) continue; EdgeWeight weight = adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_u = tmp_idx[u]; for (int j = 0; j < tmp_idx_u.first.size(); ++j) { NodeID w = tmp_idx_u.first[j]; EdgeWeight vuw_distance = tmp_idx_u.second[j] + weight; // Distance from v to u to w. // Check whether the path (v->u->w) should be added into the labels of v. We prune it if the existing labels of v and w can answer the distance directly. //pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; //for (int k = 0; k < tmp_idx_w.first.size(); ++k) { // NodeID labelOfW = tmp_idx_w.first[k]; // EdgeWeight distanceToIt = tmp_idx_w.second[k] + dst_r[labelOfW]; // // Prune this label, we do not add (w, vuw_distance) to the label set of v. // if (distanceToIt <= vuw_distance) goto pruning; //} if (dst_r[w] > vuw_distance) { if (dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); dst_r[w] = vuw_distance; } //pruning: {} } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; for (int k = 0; k < tmp_idx_w.first.size(); ++k) { NodeID labelOfW = tmp_idx_w.first[k]; EdgeWeight distanceToIt = tmp_idx_w.second[k] + dst_r[labelOfW]; if (dst_r[w] >= distanceToIt) { if (labelOfW != w) dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (dst_r[w] == INF_WEIGHT) continue; tmp_idx_v.first.push_back(w); tmp_idx_v.second.push_back(dst_r[w]); dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); tmp_idx_v.first.push_back(v); tmp_idx_v.second.push_back(0); } } if (DIRECTED_FLAG == false) { for (size_t v = 0; v < numOfVertices; ++v) { tmp_idx[v].first.push_back(numOfVertices); tmp_idx[v].second.push_back(INF_WEIGHT); NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; // tmp_idx[v].first.clear(); vector<NodeID>().swap(tmp_idx[v].first); vector<EdgeWeight>().swap(tmp_idx[v].second); // tmp_idx[v].second.clear(); } } } void td_labeling(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { if (chgraph.adj[i].size() != 0) sort(chgraph.adj[i].rbegin(), chgraph.adj[i].rend()); if (chgraph.r_adj[i].size() != 0) sort(chgraph.r_adj[i].rbegin(), chgraph.r_adj[i].rend()); } vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>()));*/ vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; //vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; //vector<index_t>& index_ = labels.index_; //vector<NodeID> &inv = orders.inv; //vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; //vector<EdgeID>& vertices = wgraph.vertices; //vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { // pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; //for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { // NodeID w = edges[eid].first; // EdgeWeight w_d = edges[eid].second + v_d; for (int i = 0; i < adj_v.size(); ++i) { NodeID w = adj_v[i].target; EdgeWeight w_d = adj_v[i].weight + v_d; if (w < v) break; //Only the neighbors with lower ranks will be visited. if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } cout << "total pop:" << pop << endl; cout << "average max heap size " << (double)hsize / (double)numOfVertices << endl; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); } } void labeling_directed(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { /* cout << i << endl; if(i == 5073) cout << chgraph.adj[i].size() << endl; for (int j = 0; j < chgraph.adj[i].size(); ++j) { if (i == 5073) cout << "," << j; NodeID hh = chgraph.adj[i][j].target; } cout << "test ok " << chgraph.adj[i].size(); for (int j = 0; j < chgraph.r_adj[i].size(); ++j) { NodeID hh = chgraph.r_adj[i][j].target; }*/ sort(chgraph.adj[i].begin(), chgraph.adj[i].end()); sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); //cout << "," << chgraph.r_adj[i].size() << endl; } vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // The main process: // Forward search: fetch necessary outLabel of the forward adj. (all outgoing labels must pass out neighbors) // Backward search: fetch necessary inLabel of the backward adj.(all incoming labels must pass in neighbors) // Start to process every nodes. for (NodeID v = 0; v < numOfVertices; ++v) { // Forward search. { vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; // Search all the neighrbor u of v. for (int i = 0; i < adj_v.size(); ++i) { NodeID u = adj_v[i].target; NodeID level = adj_v[i].level; if (u >= v) continue; EdgeWeight weight = adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_u = tmp_idx[u]; for (int j = 0; j < tmp_idx_u.first.size(); ++j) { NodeID w = tmp_idx_u.first[j]; EdgeWeight vuw_distance = tmp_idx_u.second[j] + weight; // Distance from v to u to w. if (dst_r[w] > vuw_distance) { if (dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); dst_r[w] = vuw_distance; } } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_w = r_tmp_idx[w]; for (int k = 0; k < r_tmp_idx_w.first.size(); ++k) { NodeID labelOfW = r_tmp_idx_w.first[k]; EdgeWeight distanceToIt = r_tmp_idx_w.second[k] + dst_r[labelOfW]; if (dst_r[w] >= distanceToIt) { if (labelOfW != w) dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (dst_r[w] == INF_WEIGHT) continue; tmp_idx_v.first.push_back(w); tmp_idx_v.second.push_back(dst_r[w]); dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); tmp_idx_v.first.push_back(v); tmp_idx_v.second.push_back(0); } // Backward search. { vector<CHGraph::CH_Edge>& r_adj_v = chgraph.r_adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; // Search all the neighrbor u of v. for (int i = 0; i < r_adj_v.size(); ++i) { NodeID u = r_adj_v[i].target; NodeID level = r_adj_v[i].level; if (u >= v) continue; EdgeWeight weight = r_adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_u = r_tmp_idx[u]; for (int j = 0; j < r_tmp_idx_u.first.size(); ++j) { NodeID w = r_tmp_idx_u.first[j]; EdgeWeight wuv_distance = r_tmp_idx_u.second[j] + weight; // Distance from w to u to v. if (r_dst_r[w] > wuv_distance) { if (r_dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); r_dst_r[w] = wuv_distance; } //pruning: {} } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; for (int k = 0; k < tmp_idx_w.first.size(); ++k) { NodeID labelOfW = tmp_idx_w.first[k]; EdgeWeight distanceToIt = tmp_idx_w.second[k] + r_dst_r[labelOfW]; if (r_dst_r[w] >= distanceToIt) { if (labelOfW != w) r_dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (r_dst_r[w] == INF_WEIGHT) continue; r_tmp_idx_v.first.push_back(w); r_tmp_idx_v.second.push_back(r_dst_r[w]); r_dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); r_tmp_idx_v.first.push_back(v); r_tmp_idx_v.second.push_back(0); } } int added = 0; for (size_t v = 0; v < numOfVertices; ++v ){ tmp_idx[v].first.push_back(numOfVertices); tmp_idx[v].second.push_back(INF_WEIGHT); NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); added += k; r_tmp_idx[v].first.push_back(numOfVertices); r_tmp_idx[v].second.push_back(INF_WEIGHT); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); added += k; } cout << added << endl; } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting, const double SWITCH_DEGREE_PARA, const int HOP_LIMIT_PARA) { iteration_generated.resize(numOfVertices); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); unordered_set<NodeID> visited_set; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<bool> hitTarget(numOfVertices);// false vector<bool> vis(numOfVertices); contracted.resize(numOfVertices, false); possibleWitness.resize(numOfVertices, INF_WEIGHT); mtmBucket.resize(numOfVertices); // Temporary structures for Dijkstra searches. NodeID remainNode = numOfVertices; int currentEdges = relabelByOrder(chgraph, orders); int hopLimitsParameter = 1; double currentDegree = (double)currentEdges / (double)numOfVertices; time_contracting = GetCurrentTimeSec(); // A main loop to pick all vertices from the least to the most important ones. for (NodeID v = numOfVertices - 1; v > 0; --v) { if(v % (numOfVertices /10) == 0) cout << "Building shortcuts for " << v << "th vertices("<< orders.inv[v] <<"). Total shortcuts ratio so far " << currentEdges / numOfEdges << " time:" << GetCurrentTimeSec() - time_contracting << "s" << endl; contracted[v] = true; double time_thisround = GetCurrentTimeSec(); int addShortcuts = witness_search(v, chgraph, hopLimitsParameter, orders, vis, pqueue, visited_set, distances, hitTarget); iteration_generated[v] = GetCurrentTimeSec() - time_thisround; currentEdges += addShortcuts; currentDegree = (double)currentEdges / (double)v; if (HOP_LIMIT_PARA == 2 || HOP_LIMIT_PARA == 1 || HOP_LIMIT_PARA == 5) { hopLimitsParameter = HOP_LIMIT_PARA; continue; } if (currentDegree > 3.3 && currentDegree <= SWITCH_DEGREE_PARA && hopLimitsParameter != 2) { hopLimitsParameter = 2; cout << "At Vertex " << v << ". Switch to 2-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; }else if (currentDegree <= 3.3 && hopLimitsParameter !=1) { hopLimitsParameter = 1; cout << "At Vertex " << v << ". Switch to 1-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; }else if(hopLimitsParameter != 5 && currentDegree > SWITCH_DEGREE_PARA){ hopLimitsParameter = 5; cout << "At Vertex " << v << ". Switch to multi-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } } time_contracting = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_contracting << " s on creating shortcuts..." << endl; cout << "Start to label vertices..." << endl; labeling(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, bool directed_flag, double& time_contracting, const double SWITCH_DEGREE_PARA, const int HOP_LIMIT_PARA) { benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); unordered_set<NodeID> visited_set; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<bool> hitTarget(numOfVertices);// false vector<bool> vis(numOfVertices); iteration_generated.resize(numOfVertices); contracted.resize(numOfVertices, false); possibleWitness.resize(numOfVertices, INF_WEIGHT); mtmBucket.resize(numOfVertices); // Temporary structures for Dijkstra searches. NodeID remainNode = numOfVertices; int currentEdges = relabelByOrder(chgraph, orders); int hopLimitsParameter = 1; double currentDegree = (double)currentEdges / (double)numOfVertices; time_contracting = GetCurrentTimeSec(); // A main loop to pick all vertices from the least to the most important ones. for (NodeID v = numOfVertices - 1; v > 0; --v) { if (v % (numOfVertices / 10) == 0) cout << "Building shortcuts for " << v << "th vertices(" << orders.inv[v] << "). Total shortcuts ratio so far " << currentEdges / numOfEdges << " time:" << GetCurrentTimeSec()- time_contracting << "s" << endl; contracted[v] = true; double time_thisround = GetCurrentTimeSec(); int addShortcuts = witness_search_directed(v, chgraph, hopLimitsParameter, orders, vis, pqueue, visited_set, distances, hitTarget); iteration_generated[v] = GetCurrentTimeSec() - time_thisround; currentEdges += addShortcuts; currentDegree = (double)currentEdges / (double)v; if (HOP_LIMIT_PARA == 2 || HOP_LIMIT_PARA == 1 || HOP_LIMIT_PARA == 5) { hopLimitsParameter = HOP_LIMIT_PARA; continue; } if (currentDegree > 3.3 && currentDegree <= SWITCH_DEGREE_PARA && hopLimitsParameter != 2) { hopLimitsParameter = 2; cout << "At Vertex " << v << ". Switch to 2-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } else if (currentDegree <= 3.3 && hopLimitsParameter != 1) { hopLimitsParameter = 1; cout << "At Vertex " << v << ". Switch to 1-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } else if (hopLimitsParameter != 5 && currentDegree > SWITCH_DEGREE_PARA) { hopLimitsParameter = 5; cout << "At Vertex " << v << ". Switch to multi-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } } time_contracting = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_contracting << " s on creating shortcuts..." << endl; cout << "Start to label vertices..." << endl; labeling_directed(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting) { vector<vector<CHGraph::CH_Edge> > shortcuts(numOfVertices); vector<vector<CHGraph::CH_Edge> > r_shortcuts(numOfVertices); //benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); //unordered_set<NodeID> visited_set; //vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); //vector<bool> hitTarget(numOfVertices);// false //vector<bool> vis(numOfVertices); //iteration_generated.resize(numOfVertices); //vector<NodeID> max_parents(numOfVertices, numOfVertices); //vector<bool> isNeighbor(numOfVertices, false); //vector<bool> waitForPop(numOfVertices, false); // //vector<NodeID> que(numOfVertices); relabelByOrder(chgraph, orders); long total_shortcut = 0; time_contracting = GetCurrentTimeSec(); double time_shortcuting = GetCurrentTimeSec(); int num_threads = 10; omp_set_num_threads(num_threads); vector<vector<vector<CHGraph::CH_Edge> > > shortcuts_omp(num_threads, vector<vector<CHGraph::CH_Edge> >(numOfVertices)); vector<vector<vector<CHGraph::CH_Edge> > > r_shortcuts_omp(num_threads, vector<vector<CHGraph::CH_Edge> >(numOfVertices)); vector<benchmark::heap<2, EdgeWeight, NodeID> > pqueue(num_threads, benchmark::heap<2, EdgeWeight, NodeID>(numOfVertices) ); vector<unordered_set<NodeID> > visited_set(num_threads); vector<vector<EdgeWeight> > distances(num_threads, vector<EdgeWeight>(numOfVertices, INF_WEIGHT)); vector<vector<bool> > hitTarget(num_threads, vector<bool>(numOfVertices) );// false vector<vector<bool> > vis(num_threads, vector<bool>(numOfVertices)); iteration_generated.resize(numOfVertices); vector<vector<NodeID> > max_parents(num_threads, vector<NodeID>(numOfVertices, numOfVertices)); vector<vector<bool> > isNeighbor(num_threads, vector<bool>(numOfVertices, false)); vector<vector<bool> > waitForPop(num_threads, vector<bool>(numOfVertices, false)); vector<vector<NodeID> > que(num_threads, vector<NodeID>(numOfVertices)); #pragma omp parallel for schedule(dynamic) for (NodeID v = numOfVertices - 1; v > 0; --v) { if (WEIGHTED_FLAG == true) { //build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, shortcuts); build_shortcuts_dij(v, chgraph.adj, vis[omp_get_thread_num()], pqueue[omp_get_thread_num()], visited_set[omp_get_thread_num()], distances[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], waitForPop[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()]); if (DIRECTED_FLAG == true) // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, r_shortcuts); build_shortcuts_dij(v, chgraph.r_adj, vis[omp_get_thread_num()], pqueue[omp_get_thread_num()], visited_set[omp_get_thread_num()], distances[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], waitForPop[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()]); } else { build_shortcuts_bfs(v, chgraph.adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); // build_shortcuts_bfs(v, chgraph.adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); if (DIRECTED_FLAG == true) build_shortcuts_bfs(v, chgraph.r_adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); //build_shortcuts_bfs(v, chgraph.r_adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); } } for (int t = 0; t < num_threads; ++t) { for (int v = 0; v < numOfVertices; ++v) { for (int i = 0; i < shortcuts_omp[t][v].size(); ++i) { shortcuts[v].push_back(shortcuts_omp[t][v][i]); } for (int i = 0; i < r_shortcuts_omp[t][v].size(); ++i) { r_shortcuts[v].push_back(r_shortcuts_omp[t][v][i]); } } } //for (NodeID v = numOfVertices - 1; v > 0; --v) { // if (WEIGHTED_FLAG == true) { // //build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, shortcuts); // build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, waitForPop, shortcuts); // if (DIRECTED_FLAG == true) // // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, r_shortcuts); // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, waitForPop, r_shortcuts); // } else { // build_shortcuts_bfs(v, chgraph.adj, vis, max_parents, isNeighbor, shortcuts, que); // if (DIRECTED_FLAG == true) // build_shortcuts_bfs(v, chgraph.r_adj, vis, max_parents, isNeighbor, r_shortcuts, que); // } //} double time_searching = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_searching << " s on searching shortcuts..." << endl; if (DIRECTED_FLAG == false) total_shortcut += process_shortcuts(chgraph, shortcuts); else total_shortcut += process_shortcuts_directed(chgraph, shortcuts, r_shortcuts); time_contracting = GetCurrentTimeSec() - time_contracting - time_searching; cout << "Spent " << time_contracting << " s on processing shortcuts..." << endl; cout << "Start to label vertices..." << endl; cout << total_shortcut << " shortcuts in total." << endl; time_shortcuting = GetCurrentTimeSec() - time_shortcuting; time_contracting = time_shortcuting; if (DIRECTED_FLAG == false) td_labeling(chgraph, orders); //labeling(chgraph, orders); else labeling_directed(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting, bool CH_ORDER_FLAGS) { relabelByOrder(chgraph, orders); labeling(chgraph, orders); } long process_shortcuts(CHGraph& chgraph, vector<vector<CHGraph::CH_Edge> > shortcuts) { vector<vector<bool> > add_out(numOfVertices); vector<vector<bool> > add_in(numOfVertices); long added_shortcuts = 0; // Test whether shortcut can replace the original edges. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; add_out_v.resize(shortcuts_v.size(), true); add_in_v.resize(shortcuts_v.size(), true); for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.adj[v].begin(), chgraph.adj[v].end(), shortcuts_v[i]); if (pos != chgraph.adj[v].end() && (*pos).target == shortcuts_v[i].target) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_out_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.adj[w].begin(), chgraph.adj[w].end(), dummy); if (pos != chgraph.adj[w].end() && (*pos).target == v) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_in_v[i] = false; } } } // Append the actual shortcuts. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; NodeID level = shortcuts_v[i].level; EdgeWeight weight = shortcuts_v[i].weight; if (add_out_v[i] == true) chgraph.adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); if (add_in_v[i] == true) chgraph.adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); if (add_out_v[i]==false && add_in_v[i]==false) added_shortcuts--; added_shortcuts++; } } return added_shortcuts; } long process_shortcuts_directed(CHGraph& chgraph, vector<vector<CHGraph::CH_Edge> > shortcuts, vector<vector<CHGraph::CH_Edge> > r_shortcuts) { vector<vector<bool> > add_out(numOfVertices); vector<vector<bool> > add_in(numOfVertices); vector<vector<bool> > r_add_out(numOfVertices); vector<vector<bool> > r_add_in(numOfVertices); long added_shortcuts = 0; // Test whether shortcut can replace the original edges. for (NodeID v = 0; v < numOfVertices; ++v) { {// Forward shortcut from (v->w), so add this shortcut into adj[v] and r_adj[w]. vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; add_out_v.resize(shortcuts_v.size(), true); add_in_v.resize(shortcuts_v.size(), true); for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.adj[v].begin(), chgraph.adj[v].end(), shortcuts_v[i]); if (pos != chgraph.adj[v].end() && (*pos).target == shortcuts_v[i].target) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_out_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.r_adj[w].begin(), chgraph.r_adj[w].end(), dummy); if (pos != chgraph.r_adj[w].end() && (*pos).target == v) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_in_v[i] = false; } } } {// Backward shortcuts, add (w->v) into adj[w] and r_adj[v]. vector<CHGraph::CH_Edge>& r_shortcuts_v = r_shortcuts[v]; vector<bool>& r_add_out_v = r_add_out[v]; vector<bool>& r_add_in_v = r_add_in[v]; r_add_out_v.resize(r_shortcuts_v.size(), true); r_add_in_v.resize(r_shortcuts_v.size(), true); for (int i = 0; i < r_shortcuts_v.size(); ++i) { NodeID w = r_shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.r_adj[v].begin(), chgraph.r_adj[v].end(), r_shortcuts_v[i]); if (pos != chgraph.r_adj[v].end() && (*pos).target == r_shortcuts_v[i].target) { if ((*pos).weight > r_shortcuts_v[i].weight) (*pos).weight = r_shortcuts_v[i].weight; r_add_in_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.adj[w].begin(), chgraph.adj[w].end(), dummy); if (pos != chgraph.adj[w].end() && (*pos).target == v) { if ((*pos).weight > r_shortcuts_v[i].weight) (*pos).weight = r_shortcuts_v[i].weight; r_add_out_v[i] = false; } } } } // Append the actual shortcuts. for (NodeID v = 0; v < numOfVertices; ++v) { {//Forward shortcuts, add (v->w) to adj[v] and r_adj[w]. vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; NodeID level = shortcuts_v[i].level; EdgeWeight weight = shortcuts_v[i].weight; if (add_out_v[i] == true) { chgraph.adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); // cout << v << "," << w << "," << weight << endl; } if (add_in_v[i] == true) { chgraph.r_adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); // cout << w << "," << v << "," << weight << endl; } if (add_out_v[i] == false && add_in_v[i] == false) added_shortcuts--; added_shortcuts++; } } {// Backward shortcuts, add (w->v) to adj[w] and r_adj[v]. vector<CHGraph::CH_Edge>& r_shortcuts_v = r_shortcuts[v]; vector<bool>& r_add_out_v = r_add_out[v]; vector<bool>& r_add_in_v = r_add_in[v]; for (int i = 0; i < r_shortcuts_v.size(); ++i) { NodeID w = r_shortcuts_v[i].target; NodeID level = r_shortcuts_v[i].level; EdgeWeight weight = r_shortcuts_v[i].weight; if (r_add_out_v[i] == true) { chgraph.adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); // cout << w << "," << v << "," << weight << endl; } if (r_add_in_v[i] == true) { chgraph.r_adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); // cout << v << "," << w << "," << weight << endl; } if (r_add_out_v[i] == false && r_add_in_v[i] == false) added_shortcuts--; added_shortcuts++; } } } return added_shortcuts; } }; #endif
FunctionalDecomposition.c
#include<stdlib.h> #include<stdio.h> #include<math.h> #include<omp.h> int NowYear; // 2017 - 2022 int NowMonth; // 0 - 11 float NowPrecip; // inches of rain per month float NowTemp; // temperature this month float NowHeight; // grain height in inches int NowNumDeer; // number of deer in the current population const float GRAIN_GROWS_PER_MONTH = 8.0; const float ONE_DEER_EATS_PER_MONTH = 0.5; const float AVG_PRECIP_PER_MONTH = 6.0; // average const float AMP_PRECIP_PER_MONTH = 6.0; // plus or minus const float RANDOM_PRECIP = 2.0; // plus or minus noise const float AVG_TEMP = 50.0; // average const float AMP_TEMP = 20.0; // plus or minus const float RANDOM_TEMP = 10.0; // plus or minus noise const float MIDTEMP = 40.0; const float MIDPRECIP = 10.0; float Ranf( unsigned int *seedp, float low, float high ) { float r = (float) rand_r( seedp ); // 0 - RAND_MAX return( low + r * ( high - low ) / (float)RAND_MAX ); } //Calculate square factor float SQR( float x ) { return x*x; } //Calculate the new Deer growth based on grain growth void GrainDeer() { float tmp=NowHeight; tmp -= (float)NowNumDeer * ONE_DEER_EATS_PER_MONTH; #pragma omp barrier if(tmp<1) { NowNumDeer--; if(NowNumDeer <0) { NowNumDeer=0; } } else { NowNumDeer++; } #pragma omp barrier NowHeight=NowHeight-tmp; #pragma omp barrier } //calculate the new grain growth based on precipitation void Grain() { float tempFactor = exp(-SQR((NowTemp - MIDTEMP)/10.)); float precipFactor = exp(-SQR(( NowPrecip - MIDPRECIP)/10.)); float tmp=NowHeight; tmp +=tempFactor * precipFactor * GRAIN_GROWS_PER_MONTH; #pragma omp barrier NowHeight=tmp; #pragma omp barrier #pragma omp barrier } void Watcher() { #pragma omp barrier #pragma omp barrier float ang = ( 30.*(float)NowMonth + 15. ) * ( M_PI / 180. ); static unsigned int seed = 0; // a thread-private variable float temp = AVG_TEMP - AMP_TEMP * cos( ang ); NowTemp = temp + Ranf( &seed, -RANDOM_TEMP, RANDOM_TEMP ); float precip = AVG_PRECIP_PER_MONTH + AMP_PRECIP_PER_MONTH * sin( ang ); NowPrecip = precip + Ranf( &seed, -RANDOM_PRECIP, RANDOM_PRECIP ); if( NowPrecip < 0. ) NowPrecip = 0.; printf("Grain Growth= %f,DeerGrowth= %d, Temp= %f, Precipitation= %f\n",NowHeight,NowNumDeer,NowTemp, NowPrecip); #pragma omp barrier } int main() { NowYear=2017; NowMonth=0; NowNumDeer=1; NowHeight=1; float ang = ( 30.*(float)NowMonth + 15. ) * ( M_PI / 180. ); float temp = AVG_TEMP - AMP_TEMP * cos( ang ); static unsigned int seed = 0; NowTemp = temp + Ranf( &seed, -RANDOM_TEMP, RANDOM_TEMP ); float precip = AVG_PRECIP_PER_MONTH + AMP_PRECIP_PER_MONTH * sin( ang ); NowPrecip = precip + Ranf( &seed, -RANDOM_PRECIP, RANDOM_PRECIP ); if( NowPrecip < 0. ) NowPrecip = 0.; omp_set_num_threads(3); // same as # of sections while(NowYear <=2022) { while(NowMonth <=11) { #pragma omp parallel sections { #pragma omp section { GrainDeer(); } #pragma omp section { Grain(); } #pragma omp section { Watcher(); } //#pragma omp section // { // MyAgent(); // your own // } } NowMonth++; //exit(0); } NowMonth=0; NowYear++; } }
GB_binop__div_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_int16) // A.*B function (eWiseMult): GB (_AemultB_08__div_int16) // A.*B function (eWiseMult): GB (_AemultB_02__div_int16) // A.*B function (eWiseMult): GB (_AemultB_04__div_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int16) // A*D function (colscale): GB (_AxD__div_int16) // D*A function (rowscale): GB (_DxB__div_int16) // C+=B function (dense accum): GB (_Cdense_accumB__div_int16) // C+=b function (dense accum): GB (_Cdense_accumb__div_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int16) // C=scalar+B GB (_bind1st__div_int16) // C=scalar+B' GB (_bind1st_tran__div_int16) // C=A+scalar GB (_bind2nd__div_int16) // C=A'+scalar GB (_bind2nd_tran__div_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (x, y, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_INT16 || GxB_NO_DIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (x, aij, 16) ; \ } GrB_Info GB (_bind1st_tran__div_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, y, 16) ; \ } GrB_Info GB (_bind2nd_tran__div_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB109-orderedmissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* This is a program based on a test contributed by Yizi Gu@Rice Univ. * Missing the ordered clause * Data race pair: x@56:5 vs. x@56:5 * */ int main() { int x =0; #pragma omp parallel for reduction(+:x) for (int i = 0; i < 100; ++i) { x++; } printf ("x=%d\n",x); return 0; }
GB_binop__plus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_int16 // A.*B function (eWiseMult): GB_AemultB__plus_int16 // A*D function (colscale): GB_AxD__plus_int16 // D*A function (rowscale): GB_DxB__plus_int16 // C+=B function (dense accum): GB_Cdense_accumB__plus_int16 // C+=b function (dense accum): GB_Cdense_accumb__plus_int16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_int16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_int16 // C=scalar+B GB_bind1st__plus_int16 // C=scalar+B' GB_bind1st_tran__plus_int16 // C=A+scalar GB_bind2nd__plus_int16 // C=A'+scalar GB_bind2nd_tran__plus_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT16 || GxB_NO_PLUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__plus_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lulesh.c
/* This is a Version 2.0 MPI + Open{ACC,MP} Beta implementation of LULESH Copyright (c) 2010-2013. Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-461231 All rights reserved. This file is part of LULESH, Version 2.0. Please also read this link -- http://www.opensource.org/licenses/index.php ////////////// DIFFERENCES BETWEEN THIS VERSION (2.x) AND EARLIER VERSIONS: * Addition of regions to make work more representative of multi-material codes * Default size of each domain is 30^3 (27000 elem) instead of 45^3. This is more representative of our actual working set sizes * Single source distribution supports pure serial, pure OpenMP, MPI-only, and MPI+OpenMP * Addition of ability to visualize the mesh using VisIt https://wci.llnl.gov/codes/visit/download.html * Various command line options (see ./lulesh2.0 -h) -q : quiet mode - suppress stdout -i <iterations> : number of cycles to run -s <size> : length of cube mesh along side -r <numregions> : Number of distinct regions (def: 11) -b <balance> : Load balance between regions of a domain (def: 1) -c <cost> : Extra cost of more expensive regions (def: 1) -f <filepieces> : Number of file parts for viz output (def: np/9) -p : Print out progress -v : Output viz file (requires compiling with -DVIZ_MESH -h : This message printf("Usage: %s [opts]\n", execname); printf(" where [opts] is one or more of:\n"); printf(" -q : quiet mode - suppress all stdout\n"); printf(" -i <iterations> : number of cycles to run\n"); printf(" -s <size> : length of cube mesh along side\n"); printf(" -r <numregions> : Number of distinct regions (def: 11)\n"); printf(" -b <balance> : Load balance between regions of a domain (def: 1)\n"); printf(" -c <cost> : Extra cost of more expensive regions (def: 1)\n"); printf(" -f <numfiles> : Number of files to split viz dump into (def: (np+10)/9)\n"); printf(" -p : Print out progress\n"); printf(" -v : Output viz file (requires compiling with -DVIZ_MESH\n"); printf(" -h : This message\n"); printf("\n\n"); *Notable changes in LULESH 2.0 * Split functionality into different files lulesh.cc - where most (all?) of the timed functionality lies lulesh-comm.cc - MPI functionality lulesh-init.cc - Setup code lulesh-util.cc - Non-timed functions * * The concept of "regions" was added, although every region is the same ideal gas material, and the same sedov blast wave problem is still the only problem its hardcoded to solve. Regions allow two things important to making this proxy app more representative: * Four of the LULESH routines are now performed on a region-by-region basis, making the memory access patterns non-unit stride * Artificial load imbalances can be easily introduced that could impact parallelization strategies. * The load balance flag changes region assignment. Region number is raised to the power entered for assignment probability. Most likely regions changes with MPI process id. * The cost flag raises the cost of ~45% of the regions to evaluate EOS by the entered multiple. The cost of 5% is 10x the entered multiple. * MPI and OpenMP were added, and coalesced into a single version of the source that can support serial builds, MPI-only, OpenMP-only, and MPI+OpenMP * Added support to write plot files using "poor mans parallel I/O" when linked with the silo library, which in turn can be read by VisIt. * Enabled variable timestep calculation by default (courant condition), which results in an additional reduction. * Default domain (mesh) size reduced from 45^3 to 30^3 * Command line options to allow for numerous test cases without needing to recompile * Performance optimizations and code cleanup uncovered during study of LULESH 1.0 * Added a "Figure of Merit" calculation (elements solved per microsecond) and output in support of using LULESH 2.0 for the 2017 CORAL procurement * * Possible Differences in Final Release (other changes possible) * * High Level mesh structure to allow data structure transformations * Different default parameters * Minor code performance changes and cleanup TODO in future versions * Add reader for (truly) unstructured meshes, probably serial only * CMake based build system ////////////// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Additional BSD Notice 1. This notice is required to be provided under our contract with the U.S. Department of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE. 2. Neither the United States Government nor Lawrence Livermore National Security, LLC nor any of their employees, makes any warranty, express or implied, or assumes any liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately-owned rights. 3. Also, reference herein to any specific commercial products, process, or services by trade name, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes. */ //#include <math.h> #include <stdio.h> #include <stdlib.h> //#include <string.h> //#include <ctype.h> #include <time.h> #if !defined(_OPENACC) && defined(_OPENMP) # include <omp.h> #endif #include "lulesh.h" #if !defined(LULESH_DUMP_OUTPUT) #define LULESH_DUMP_OUTPUT 0 #endif /*********************************/ /* Data structure implementation */ /*********************************/ /* These structs are used to turn local, constant-sized arrays into scalars inside of accelerated regions. */ typedef struct { Real_t v0; Real_t v1; Real_t v2; Real_t v3; Real_t v4; Real_t v5; Real_t v6; Real_t v7; } val8; typedef struct { Real_t v0; Real_t v1; Real_t v2; Real_t v3; Real_t v4; Real_t v5; } val6; typedef struct { // 3x8 matrix for loop unrolling Real_t v0_0; Real_t v0_1; Real_t v0_2; Real_t v0_3; Real_t v0_4; Real_t v0_5; Real_t v0_6; Real_t v0_7; Real_t v1_0; Real_t v1_1; Real_t v1_2; Real_t v1_3; Real_t v1_4; Real_t v1_5; Real_t v1_6; Real_t v1_7; Real_t v2_0; Real_t v2_1; Real_t v2_2; Real_t v2_3; Real_t v2_4; Real_t v2_5; Real_t v2_6; Real_t v2_7; } bmat; typedef struct { // 8x4 matrix for loop unrolling Real_t v0_0; Real_t v0_1;Real_t v0_2;Real_t v0_3; Real_t v1_0; Real_t v1_1;Real_t v1_2;Real_t v1_3; Real_t v2_0; Real_t v2_1;Real_t v2_2;Real_t v2_3; Real_t v3_0; Real_t v3_1;Real_t v3_2;Real_t v3_3; Real_t v4_0; Real_t v4_1;Real_t v4_2;Real_t v4_3; Real_t v5_0; Real_t v5_1;Real_t v5_2;Real_t v5_3; Real_t v6_0; Real_t v6_1;Real_t v6_2;Real_t v6_3; Real_t v7_0; Real_t v7_1;Real_t v7_2;Real_t v7_3; } hourmat; #if USE_MPI // Communication Work space Real_t *commDataSend ; Real_t *commDataRecv ; // Maximum number of block neighbors MPI_Request recvRequest[26] ; // 6 faces + 12 edges + 8 corners MPI_Request sendRequest[26] ; // 6 faces + 12 edges + 8 corners #endif int m_numDevs; /* Node-centered */ Real_t* m_x ; /* coordinates */ Real_t* m_y ; Real_t* m_z ; Real_t* m_xd ; /* velocities */ Real_t* m_yd ; Real_t* m_zd ; Real_t* m_xdd ; /* accelerations */ Real_t* m_ydd ; Real_t* m_zdd ; Real_t* m_fx ; /* forces */ Real_t* m_fy ; Real_t* m_fz ; /* tmp arrays that are allocated globally for OpenACC */ Real_t* m_fx_elem ; Real_t* m_fy_elem ; Real_t* m_fz_elem ; Real_t* m_dvdx ; Real_t* m_dvdy ; Real_t* m_dvdz ; Real_t* m_x8n ; Real_t* m_y8n ; Real_t* m_z8n ; Real_t* m_sigxx ; Real_t* m_sigyy ; Real_t* m_sigzz ; Real_t* m_determ ; Real_t* m_e_old ; Real_t* m_delvc ; Real_t* m_p_old ; Real_t* m_q_old ; Real_t* m_compression ; Real_t* m_compHalfStep ; Real_t* m_qq_old ; Real_t* m_ql_old ; Real_t* m_work ; Real_t* m_p_new ; Real_t* m_e_new ; Real_t* m_q_new ; Real_t* m_bvc ; Real_t* m_pbvc ; Real_t* m_nodalMass ; /* mass */ Index_t* m_symmX; /* symmetry plane nodesets */ Index_t* m_symmY; Index_t* m_symmZ; bool m_symmXempty; bool m_symmYempty; bool m_symmZempty; // Element-centered // Region information Int_t m_numReg ; Int_t m_cost; //imbalance cost Int_t *m_regElemSize ; // Size of region sets Index_t *m_regNumList ; // Region number per domain element Index_t **m_regElemlist ; // region indexset Index_t* m_matElemlist ; /* material indexset */ Index_t* m_nodelist ; /* elemToNode connectivity */ Index_t* m_lxim ; /* element connectivity across each face */ Index_t* m_lxip ; Index_t* m_letam ; Index_t* m_letap ; Index_t* m_lzetam ; Index_t* m_lzetap ; Int_t* m_elemBC ; /* symmetry/free-surface flags for each elem face */ Real_t* m_dxx ; /* principal strains -- temporary */ Real_t* m_dyy ; Real_t* m_dzz ; Real_t* m_delv_xi ; /* velocity gradient -- temporary */ Real_t* m_delv_eta ; Real_t* m_delv_zeta ; Real_t* m_delx_xi ; /* coordinate gradient -- temporary */ Real_t* m_delx_eta ; Real_t* m_delx_zeta ; Real_t* m_e ; /* energy */ Real_t* m_p ; /* pressure */ Real_t* m_q ; /* q */ Real_t* m_ql ; /* linear term for q */ Real_t* m_qq ; /* quadratic term for q */ Real_t* m_v ; /* relative volume */ Real_t* m_volo ; /* reference volume */ Real_t* m_vnew ; /* new relative volume -- temporary */ Real_t* m_delv ; /* m_vnew - m_v */ Real_t* m_vdov ; /* volume derivative over volume */ Real_t* m_arealg ; /* characteristic length of an element */ Real_t* m_ss ; /* "sound speed" */ Real_t* m_elemMass ; /* mass */ // Cutoffs (treat as constants) Real_t m_e_cut ; // energy tolerance Real_t m_p_cut ; // pressure tolerance Real_t m_q_cut ; // q tolerance Real_t m_v_cut ; // relative volume tolerance Real_t m_u_cut ; // velocity tolerance // Other constants (usually setable, but hardcoded in this proxy app) Real_t m_hgcoef ; // hourglass control Real_t m_ss4o3 ; Real_t m_qstop ; // excessive q indicator Real_t m_monoq_max_slope ; Real_t m_monoq_limiter_mult ; Real_t m_qlc_monoq ; // linear term coef for q Real_t m_qqc_monoq ; // quadratic term coef for q Real_t m_qqc ; Real_t m_eosvmax ; Real_t m_eosvmin ; Real_t m_pmin ; // pressure floor Real_t m_emin ; // energy floor Real_t m_dvovmax ; // maximum allowable volume change Real_t m_refdens ; // reference density // Variables to keep track of timestep, simulation time, and cycle Real_t m_dtcourant ; // courant constraint Real_t m_dthydro ; // volume change constraint Int_t m_cycle ; // iteration count for simulation Real_t m_dtfixed ; // fixed time increment Real_t m_time ; // current time Real_t m_deltatime ; // variable time increment Real_t m_deltatimemultlb ; Real_t m_deltatimemultub ; Real_t m_dtmax ; // maximum allowable time increment Real_t m_stoptime ; // end time for simulation Int_t m_numRanks ; Index_t m_colLoc ; Index_t m_rowLoc ; Index_t m_planeLoc ; Index_t m_tp ; Index_t m_sizeX ; Index_t m_sizeY ; Index_t m_sizeZ ; Index_t m_numElem ; Index_t m_numNode ; Index_t m_maxPlaneSize ; Index_t m_maxEdgeSize ; // OMP hack Index_t *m_nodeElemCount ; Index_t *m_nodeElemStart ; Index_t *m_nodeElemCornerList ; // Used in setup Index_t m_rowMin, m_rowMax; Index_t m_colMin, m_colMax; Index_t m_planeMin, m_planeMax ; /******************************************/ /* Work Routines */ static inline void TimeIncrement() { Real_t targetdt = m_stoptime - m_time ; if ((m_dtfixed <= (Real_t)(0.0)) && (m_cycle != (Int_t)(0))) { Real_t ratio ; Real_t olddt = m_deltatime ; /* This will require a reduction in parallel */ Real_t gnewdt = (Real_t)(1.0e+20) ; Real_t newdt ; if (m_dtcourant < gnewdt) { gnewdt = m_dtcourant / (Real_t)(2.0) ; } if (m_dthydro < gnewdt) { gnewdt = m_dthydro * (Real_t)(2.0) / (Real_t)(3.0) ; } #if USE_MPI MPI_Allreduce(&gnewdt, &newdt, 1, ((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE), MPI_MIN, MPI_COMM_WORLD) ; #else newdt = gnewdt; #endif ratio = newdt / olddt ; if (ratio >= (Real_t)(1.0)) { if (ratio < m_deltatimemultlb) { newdt = olddt ; } else if (ratio > m_deltatimemultub) { newdt = olddt*m_deltatimemultub ; } } if (newdt > m_dtmax) { newdt = m_dtmax ; } m_deltatime = newdt ; } /* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */ if ((targetdt > m_deltatime) && (targetdt < ((Real_t)(4.0) * m_deltatime / (Real_t)(3.0))) ) { targetdt = (Real_t)(2.0) * m_deltatime / (Real_t)(3.0) ; } if (targetdt < m_deltatime) { m_deltatime = targetdt ; } m_time += m_deltatime ; ++m_cycle ; } /******************************************/ static inline void InitStressTermsForElems(Real_t *p, Real_t *q, Real_t *sigxx, Real_t *sigyy, Real_t *sigzz, Index_t numElem) { // // pull in the stresses appropriate to the hydro integration // Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(p[numElem], q[numElem], \ sigxx[numElem],sigyy[numElem],sigzz[numElem]) async(0) #else #pragma acc parallel loop present(p[numElem], q[numElem], \ sigxx[numElem],sigyy[numElem],sigzz[numElem]) #endif #else #pragma omp parallel for firstprivate(numElem) #endif for (i = 0 ; i < numElem ; ++i){ sigxx[i] = sigyy[i] = sigzz[i] = - p[i] - q[i] ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ #define CalcElemShapeFunctionDerivatives_unrolled(x,y,z,b,volume) \ do {\ Real_t fjxxi, fjxet, fjxze;\ Real_t fjyxi, fjyet, fjyze;\ Real_t fjzxi, fjzet, fjzze;\ Real_t cjxxi, cjxet, cjxze;\ Real_t cjyxi, cjyet, cjyze;\ Real_t cjzxi, cjzet, cjzze;\ \ fjxxi = (Real_t)(.125) * ( (x.v6-x.v0) + (x.v5-x.v3) - (x.v7-x.v1) - (x.v4-x.v2) );\ fjxet = (Real_t)(.125) * ( (x.v6-x.v0) - (x.v5-x.v3) + (x.v7-x.v1) - (x.v4-x.v2) );\ fjxze = (Real_t)(.125) * ( (x.v6-x.v0) + (x.v5-x.v3) + (x.v7-x.v1) + (x.v4-x.v2) );\ \ fjyxi = (Real_t)(.125) * ( (y.v6-y.v0) + (y.v5-y.v3) - (y.v7-y.v1) - (y.v4-y.v2) );\ fjyet = (Real_t)(.125) * ( (y.v6-y.v0) - (y.v5-y.v3) + (y.v7-y.v1) - (y.v4-y.v2) );\ fjyze = (Real_t)(.125) * ( (y.v6-y.v0) + (y.v5-y.v3) + (y.v7-y.v1) + (y.v4-y.v2) );\ \ fjzxi = (Real_t)(.125) * ( (z.v6-z.v0) + (z.v5-z.v3) - (z.v7-z.v1) - (z.v4-z.v2) );\ fjzet = (Real_t)(.125) * ( (z.v6-z.v0) - (z.v5-z.v3) + (z.v7-z.v1) - (z.v4-z.v2) );\ fjzze = (Real_t)(.125) * ( (z.v6-z.v0) + (z.v5-z.v3) + (z.v7-z.v1) + (z.v4-z.v2) );\ \ /* compute cofactors */\ cjxxi = (fjyet * fjzze) - (fjzet * fjyze);\ cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);\ cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);\ \ cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);\ cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);\ cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);\ \ cjzxi = (fjxet * fjyze) - (fjyet * fjxze);\ cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);\ cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);\ \ /* calculate partials :\ this need only be done for l = 0,1,2,3 since , by symmetry ,\ (6,7,4,5) = - (0,1,2,3) .\ */\ (b.v0_0) = - cjxxi - cjxet - cjxze;\ (b.v0_1) = cjxxi - cjxet - cjxze;\ (b.v0_2) = cjxxi + cjxet - cjxze;\ (b.v0_3) = - cjxxi + cjxet - cjxze;\ (b.v0_4) = -(b.v0_2);\ (b.v0_5) = -(b.v0_3);\ (b.v0_6) = -(b.v0_0);\ (b.v0_7) = -(b.v0_1);\ \ (b.v1_0) = - cjyxi - cjyet - cjyze;\ (b.v1_1) = cjyxi - cjyet - cjyze;\ (b.v1_2) = cjyxi + cjyet - cjyze;\ (b.v1_3) = - cjyxi + cjyet - cjyze;\ (b.v1_4) = -(b.v1_2);\ (b.v1_5) = -(b.v1_3);\ (b.v1_6) = -(b.v1_0);\ (b.v1_7) = -(b.v1_1);\ \ (b.v2_0) = - cjzxi - cjzet - cjzze;\ (b.v2_1) = cjzxi - cjzet - cjzze;\ (b.v2_2) = cjzxi + cjzet - cjzze;\ (b.v2_3) = - cjzxi + cjzet - cjzze;\ (b.v2_4) = -(b.v2_2);\ (b.v2_5) = -(b.v2_3);\ (b.v2_6) = -(b.v2_0);\ (b.v2_7) = -(b.v2_1);\ \ /* calculate jacobian determinant (volume) */\ (volume) = (Real_t)(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);\ } while(0)\ /******************************************/ //static inline #define SumElemFaceNormal(normalX0, normalY0, normalZ0,\ normalX1, normalY1, normalZ1,\ normalX2, normalY2, normalZ2,\ normalX3, normalY3, normalZ3,\ x0, y0, z0,\ x1, y1, z1,\ x2, y2, z2,\ x3, y3, z3)\ do {\ Real_t bisectX0 = (Real_t)(0.5) * ((x3) + (x2) - (x1) - (x0));\ Real_t bisectY0 = (Real_t)(0.5) * ((y3) + (y2) - (y1) - (y0));\ Real_t bisectZ0 = (Real_t)(0.5) * ((z3) + (z2) - (z1) - (z0));\ Real_t bisectX1 = (Real_t)(0.5) * ((x2) + (x1) - (x3) - (x0));\ Real_t bisectY1 = (Real_t)(0.5) * ((y2) + (y1) - (y3) - (y0));\ Real_t bisectZ1 = (Real_t)(0.5) * ((z2) + (z1) - (z3) - (z0));\ Real_t areaX = (Real_t)(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);\ Real_t areaY = (Real_t)(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);\ Real_t areaZ = (Real_t)(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);\ \ (normalX0) += areaX;\ (normalX1) += areaX;\ (normalX2) += areaX;\ (normalX3) += areaX;\ \ (normalY0) += areaY;\ (normalY1) += areaY;\ (normalY2) += areaY;\ (normalY3) += areaY;\ \ (normalZ0) += areaZ;\ (normalZ1) += areaZ;\ (normalZ2) += areaZ;\ (normalZ3) += areaZ;\ } while(0) /******************************************/ #define CalcElemNodeNormals_unrolled(B,x,y,z)\ do {\ (B.v0_0) = (Real_t)(0.0);\ (B.v1_0) = (Real_t)(0.0);\ (B.v2_0) = (Real_t)(0.0);\ (B.v0_1) = (Real_t)(0.0);\ (B.v1_1) = (Real_t)(0.0);\ (B.v2_1) = (Real_t)(0.0);\ (B.v0_2) = (Real_t)(0.0);\ (B.v1_2) = (Real_t)(0.0);\ (B.v2_2) = (Real_t)(0.0);\ (B.v0_3) = (Real_t)(0.0);\ (B.v1_3) = (Real_t)(0.0);\ (B.v2_3) = (Real_t)(0.0);\ (B.v0_4) = (Real_t)(0.0);\ (B.v1_4) = (Real_t)(0.0);\ (B.v2_4) = (Real_t)(0.0);\ (B.v0_5) = (Real_t)(0.0);\ (B.v1_5) = (Real_t)(0.0);\ (B.v2_5) = (Real_t)(0.0);\ (B.v0_6) = (Real_t)(0.0);\ (B.v1_6) = (Real_t)(0.0);\ (B.v2_6) = (Real_t)(0.0);\ (B.v0_7) = (Real_t)(0.0);\ (B.v1_7) = (Real_t)(0.0);\ (B.v2_7) = (Real_t)(0.0);\ /* evaluate face one: nodes 0, 1, 2, 3 */\ SumElemFaceNormal((B.v0_0), (B.v1_0), (B.v2_0),\ (B.v0_1), (B.v1_1), (B.v2_1),\ (B.v0_2), (B.v1_2), (B.v2_2),\ (B.v0_3), (B.v1_3), (B.v2_3),\ (x.v0), (y.v0), (z.v0), (x.v1), (y.v1), (z.v1),\ (x.v2), (y.v2), (z.v2), (x.v3), (y.v3), (z.v3));\ /* evaluate face two: nodes 0, 4, 5, 1 */\ SumElemFaceNormal((B.v0_0), (B.v1_0), (B.v2_0),\ (B.v0_4), (B.v1_4), (B.v2_4),\ (B.v0_5), (B.v1_5), (B.v2_5),\ (B.v0_1), (B.v1_1), (B.v2_1),\ (x.v0), (y.v0), (z.v0), (x.v4), (y.v4), (z.v4),\ (x.v5), (y.v5), (z.v5), (x.v1), (y.v1), (z.v1));\ /* evaluate face three: nodes 1, 5, 6, 2 */\ SumElemFaceNormal((B.v0_1), (B.v1_1), (B.v2_1),\ (B.v0_5), (B.v1_5), (B.v2_5),\ (B.v0_6), (B.v1_6), (B.v2_6),\ (B.v0_2), (B.v1_2), (B.v2_2),\ (x.v1), (y.v1), (z.v1), (x.v5), (y.v5), (z.v5),\ (x.v6), (y.v6), (z.v6), (x.v2), (y.v2), (z.v2));\ /* evaluate face four: nodes 2, 6, 7, 3 */\ SumElemFaceNormal((B.v0_2), (B.v1_2), (B.v2_2),\ (B.v0_6), (B.v1_6), (B.v2_6),\ (B.v0_7), (B.v1_7), (B.v2_7),\ (B.v0_3), (B.v1_3), (B.v2_3),\ (x.v2), (y.v2), (z.v2), (x.v6), (y.v6), (z.v6),\ (x.v7), (y.v7), (z.v7), (x.v3), (y.v3), (z.v3));\ /* evaluate face five: nodes 3, 7, 4, 0 */\ SumElemFaceNormal((B.v0_3), (B.v1_3), (B.v2_3),\ (B.v0_7), (B.v1_7), (B.v2_7),\ (B.v0_4), (B.v1_4), (B.v2_4),\ (B.v0_0), (B.v1_0), (B.v2_0),\ (x.v3), (y.v3), (z.v3), (x.v7), (y.v7), (z.v7),\ (x.v4), (y.v4), (z.v4), (x.v0), (y.v0), (z.v0));\ /* evaluate face six: nodes 4, 7, 6, 5 */\ SumElemFaceNormal((B.v0_4), (B.v1_4), (B.v2_4),\ (B.v0_7), (B.v1_7), (B.v2_7),\ (B.v0_6), (B.v1_6), (B.v2_6),\ (B.v0_5), (B.v1_5), (B.v2_5),\ (x.v4), (y.v4), (z.v4), (x.v7), (y.v7), (z.v7),\ (x.v6), (y.v6), (z.v6), (x.v5), (y.v5), (z.v5));\ } while(0) /******************************************/ static inline void IntegrateStressForElems( Index_t *nodelist, Real_t *x, Real_t *y, Real_t *z, Real_t *fx, Real_t *fy, Real_t *fz, Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem, Index_t *nodeElemCount, Index_t *nodeElemStart, Index_t *nodeElemCornerList, Real_t *sigxx, Real_t *sigyy, Real_t *sigzz, Real_t *determ, Index_t numElem, Index_t numNode) { volatile Index_t numElem8 = numElem * 8 ; Index_t k; // loop over all elements #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(x[numNode], \ y[numNode], \ z[numNode], \ determ[numElem], \ nodelist[numElem8], \ sigxx[numElem], \ sigyy[numElem], \ sigzz[numElem], \ fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8]) async(0) #else #pragma acc parallel loop present(x[numNode], \ y[numNode], \ z[numNode], \ determ[numElem], \ nodelist[numElem8], \ sigxx[numElem], \ sigyy[numElem], \ sigzz[numElem], \ fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8]) #endif #else #pragma omp parallel for firstprivate(numElem) #endif for(k = 0; k < numElem; ++k ) { const Index_t *elemToNode = &(nodelist[8*k]); bmat B; // shape function derivatives val8 x_local; val8 y_local; val8 z_local; Index_t gnode; // get nodal coordinates from global arrays and copy into local arrays. // Loop unrolled because the PGI OpenACC implementation currently stores // locally-defined arrays in a global, shared context. Thus we have to use // scalars instead to get them in registers. gnode = elemToNode[0]; x_local.v0 = x[gnode]; y_local.v0 = y[gnode]; z_local.v0 = z[gnode]; gnode = elemToNode[1]; x_local.v1 = x[gnode]; y_local.v1 = y[gnode]; z_local.v1 = z[gnode]; gnode = elemToNode[2]; x_local.v2 = x[gnode]; y_local.v2 = y[gnode]; z_local.v2 = z[gnode]; gnode = elemToNode[3]; x_local.v3 = x[gnode]; y_local.v3 = y[gnode]; z_local.v3 = z[gnode]; gnode = elemToNode[4]; x_local.v4 = x[gnode]; y_local.v4 = y[gnode]; z_local.v4 = z[gnode]; gnode = elemToNode[5]; x_local.v5 = x[gnode]; y_local.v5 = y[gnode]; z_local.v5 = z[gnode]; gnode = elemToNode[6]; x_local.v6 = x[gnode]; y_local.v6 = y[gnode]; z_local.v6 = z[gnode]; gnode = elemToNode[7]; x_local.v7 = x[gnode]; y_local.v7 = y[gnode]; z_local.v7 = z[gnode]; // Volume calculation involves extra work for numerical consistency CalcElemShapeFunctionDerivatives_unrolled(x_local, y_local, z_local, B, determ[k]); CalcElemNodeNormals_unrolled( B, x_local, y_local, z_local ); // Eliminate thread writing conflicts at the nodes by giving // each element its own copy to write to // NOTE: This is a manually inlined macro. Moving it back into macro form // requires some more pointer arithmetic which causes the current // PGI compiler to segfault during compilation (version 13.6-accel). fx_elem[k*8 + 0] = -( sigxx[k] * B.v0_0 ); fy_elem[k*8 + 0] = -( sigyy[k] * B.v1_0 ); fz_elem[k*8 + 0] = -( sigzz[k] * B.v2_0 ); fx_elem[k*8 + 1] = -( sigxx[k] * B.v0_1 ); fy_elem[k*8 + 1] = -( sigyy[k] * B.v1_1 ); fz_elem[k*8 + 1] = -( sigzz[k] * B.v2_1 ); fx_elem[k*8 + 2] = -( sigxx[k] * B.v0_2 ); fy_elem[k*8 + 2] = -( sigyy[k] * B.v1_2 ); fz_elem[k*8 + 2] = -( sigzz[k] * B.v2_2 ); fx_elem[k*8 + 3] = -( sigxx[k] * B.v0_3 ); fy_elem[k*8 + 3] = -( sigyy[k] * B.v1_3 ); fz_elem[k*8 + 3] = -( sigzz[k] * B.v2_3 ); fx_elem[k*8 + 4] = -( sigxx[k] * B.v0_4 ); fy_elem[k*8 + 4] = -( sigyy[k] * B.v1_4 ); fz_elem[k*8 + 4] = -( sigzz[k] * B.v2_4 ); fx_elem[k*8 + 5] = -( sigxx[k] * B.v0_5 ); fy_elem[k*8 + 5] = -( sigyy[k] * B.v1_5 ); fz_elem[k*8 + 5] = -( sigzz[k] * B.v2_5 ); fx_elem[k*8 + 6] = -( sigxx[k] * B.v0_6 ); fy_elem[k*8 + 6] = -( sigyy[k] * B.v1_6 ); fz_elem[k*8 + 6] = -( sigzz[k] * B.v2_6 ); fx_elem[k*8 + 7] = -( sigxx[k] * B.v0_7 ); fy_elem[k*8 + 7] = -( sigyy[k] * B.v1_7 ); fz_elem[k*8 + 7] = -( sigzz[k] * B.v2_7 ); } // If threaded, then we need to copy the data out of the temporary // arrays used above into the final forces field /* volatile because otherwise it will be optimized out of the pragma and break things. */ volatile Index_t nCorner = nodeElemStart[numNode-1] + nodeElemCount[numNode-1]; Index_t gnode; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc kernels loop independent vector(256) \ present(fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ fx[numElem], \ fy[numElem], \ fz[numElem], \ nodeElemCount[numNode], \ nodeElemCornerList[nCorner], \ nodeElemStart[numNode]) async(0) #else #pragma acc kernels loop independent vector(256) \ present(fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ fx[numElem], \ fy[numElem], \ fz[numElem], \ nodeElemCount[numNode], \ nodeElemCornerList[nCorner], \ nodeElemStart[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for( gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = nodeElemCount[gnode] ; Index_t start = nodeElemStart[gnode] ; Real_t fx_tmp = (Real_t)(0.0) ; Real_t fy_tmp = (Real_t)(0.0) ; Real_t fz_tmp = (Real_t)(0.0) ; Index_t i; for (i=0 ; i < count ; ++i) { Index_t elem = nodeElemCornerList[start+i] ; fx_tmp += fx_elem[elem] ; fy_tmp += fy_elem[elem] ; fz_tmp += fz_elem[elem] ; } fx[gnode] = fx_tmp ; fy[gnode] = fy_tmp ; fz[gnode] = fz_tmp ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ //static inline #define CollectDomainNodesToElemNodes(x, y, z, \ elemToNode, \ elemX, elemY, elemZ) \ do { \ Index_t nd0i = (elemToNode)[0] ; \ Index_t nd1i = (elemToNode)[1] ; \ Index_t nd2i = (elemToNode)[2] ; \ Index_t nd3i = (elemToNode)[3] ; \ Index_t nd4i = (elemToNode)[4] ; \ Index_t nd5i = (elemToNode)[5] ; \ Index_t nd6i = (elemToNode)[6] ; \ Index_t nd7i = (elemToNode)[7] ; \ \ (elemX).v0 = (x)[nd0i]; \ (elemX).v1 = (x)[nd1i]; \ (elemX).v2 = (x)[nd2i]; \ (elemX).v3 = (x)[nd3i]; \ (elemX).v4 = (x)[nd4i]; \ (elemX).v5 = (x)[nd5i]; \ (elemX).v6 = (x)[nd6i]; \ (elemX).v7 = (x)[nd7i]; \ \ (elemY).v0 = (y)[nd0i]; \ (elemY).v1 = (y)[nd1i]; \ (elemY).v2 = (y)[nd2i]; \ (elemY).v3 = (y)[nd3i]; \ (elemY).v4 = (y)[nd4i]; \ (elemY).v5 = (y)[nd5i]; \ (elemY).v6 = (y)[nd6i]; \ (elemY).v7 = (y)[nd7i]; \ \ (elemZ).v0 = (z)[nd0i]; \ (elemZ).v1 = (z)[nd1i]; \ (elemZ).v2 = (z)[nd2i]; \ (elemZ).v3 = (z)[nd3i]; \ (elemZ).v4 = (z)[nd4i]; \ (elemZ).v5 = (z)[nd5i]; \ (elemZ).v6 = (z)[nd6i]; \ (elemZ).v7 = (z)[nd7i]; \ } while(0) /******************************************/ //static inline #define VoluDer(x0, x1, x2, \ x3, x4, x5, \ y0, y1, y2, \ y3, y4, y5, \ z0, z1, z2, \ z3, z4, z5, \ dvdx, dvdy, dvdz) \ do { \ const Real_t twelfth = (Real_t)(1.0) / (Real_t)(12.0) ; \ \ (dvdx) = \ ((y1) + (y2)) * ((z0) + (z1)) - ((y0) + (y1)) * ((z1) + (z2)) + \ ((y0) + (y4)) * ((z3) + (z4)) - ((y3) + (y4)) * ((z0) + (z4)) - \ ((y2) + (y5)) * ((z3) + (z5)) + ((y3) + (y5)) * ((z2) + (z5)); \ (dvdy) = \ - ((x1) + (x2)) * ((z0) + (z1)) + ((x0) + (x1)) * ((z1) + (z2)) - \ ((x0) + (x4)) * ((z3) + (z4)) + ((x3) + (x4)) * ((z0) + (z4)) + \ ((x2) + (x5)) * ((z3) + (z5)) - ((x3) + (x5)) * ((z2) + (z5)); \ \ (dvdz) = \ - ((y1) + (y2)) * ((x0) + (x1)) + ((y0) + (y1)) * ((x1) + (x2)) - \ ((y0) + (y4)) * ((x3) + (x4)) + ((y3) + (y4)) * ((x0) + (x4)) + \ ((y2) + (y5)) * ((x3) + (x5)) - ((y3) + (y5)) * ((x2) + (x5)); \ \ (dvdx) *= twelfth; \ (dvdy) *= twelfth; \ (dvdz) *= twelfth; \ } while(0) /******************************************/ ///static inline #define CalcElemVolumeDerivative(dvdx, dvdy, dvdz, \ x, y, z) \ do { \ VoluDer(x.v1, x.v2, x.v3, x.v4, x.v5, x.v7, \ y.v1, y.v2, y.v3, y.v4, y.v5, y.v7, \ z.v1, z.v2, z.v3, z.v4, z.v5, z.v7, \ dvdx.v0, dvdy.v0, dvdz.v0); \ VoluDer(x.v0, x.v1, x.v2, x.v7, x.v4, x.v6, \ y.v0, y.v1, y.v2, y.v7, y.v4, y.v6, \ z.v0, z.v1, z.v2, z.v7, z.v4, z.v6, \ dvdx.v3, dvdy.v3, dvdz.v3); \ VoluDer(x.v3, x.v0, x.v1, x.v6, x.v7, x.v5, \ y.v3, y.v0, y.v1, y.v6, y.v7, y.v5, \ z.v3, z.v0, z.v1, z.v6, z.v7, z.v5, \ dvdx.v2, dvdy.v2, dvdz.v2); \ VoluDer(x.v2, x.v3, x.v0, x.v5, x.v6, x.v4, \ y.v2, y.v3, y.v0, y.v5, y.v6, y.v4, \ z.v2, z.v3, z.v0, z.v5, z.v6, z.v4, \ dvdx.v1, dvdy.v1, dvdz.v1); \ VoluDer(x.v7, x.v6, x.v5, x.v0, x.v3, x.v1, \ y.v7, y.v6, y.v5, y.v0, y.v3, y.v1, \ z.v7, z.v6, z.v5, z.v0, z.v3, z.v1, \ dvdx.v4, dvdy.v4, dvdz.v4); \ VoluDer(x.v4, x.v7, x.v6, x.v1, x.v0, x.v2, \ y.v4, y.v7, y.v6, y.v1, y.v0, y.v2, \ z.v4, z.v7, z.v6, z.v1, z.v0, z.v2, \ dvdx.v5, dvdy.v5, dvdz.v5); \ VoluDer(x.v5, x.v4, x.v7, x.v2, x.v1, x.v3, \ y.v5, y.v4, y.v7, y.v2, y.v1, y.v3, \ z.v5, z.v4, z.v7, z.v2, z.v1, z.v3, \ dvdx.v6, dvdy.v6, dvdz.v6); \ VoluDer(x.v6, x.v5, x.v4, x.v3, x.v2, x.v0, \ y.v6, y.v5, y.v4, y.v3, y.v2, y.v0, \ z.v6, z.v5, z.v4, z.v3, z.v2, z.v0, \ dvdx.v7, dvdy.v7, dvdz.v7); \ } while(0) /******************************************/ //static inline #define CalcElemFBHourglassForce(xd, yd, zd, \ hourgam, coefficient, \ hgfx, hgfy, hgfz) \ do { \ val8 hxx; \ hxx.v0 = hourgam.v0_0 * xd.v0 + hourgam.v1_0 * xd.v1 + \ hourgam.v2_0 * xd.v2 + hourgam.v3_0 * xd.v3 + \ hourgam.v4_0 * xd.v4 + hourgam.v5_0 * xd.v5 + \ hourgam.v6_0 * xd.v6 + hourgam.v7_0 * xd.v7; \ hxx.v1 = hourgam.v0_1 * xd.v0 + hourgam.v1_1 * xd.v1 + \ hourgam.v2_1 * xd.v2 + hourgam.v3_1 * xd.v3 + \ hourgam.v4_1 * xd.v4 + hourgam.v5_1 * xd.v5 + \ hourgam.v6_1 * xd.v6 + hourgam.v7_1 * xd.v7; \ hxx.v2 = hourgam.v0_2 * xd.v0 + hourgam.v1_2 * xd.v1 + \ hourgam.v2_2 * xd.v2 + hourgam.v3_2 * xd.v3 + \ hourgam.v4_2 * xd.v4 + hourgam.v5_2 * xd.v5 + \ hourgam.v6_2 * xd.v6 + hourgam.v7_2 * xd.v7; \ hxx.v3 = hourgam.v0_3 * xd.v0 + hourgam.v1_3 * xd.v1 + \ hourgam.v2_3 * xd.v2 + hourgam.v3_3 * xd.v3 + \ hourgam.v4_3 * xd.v4 + hourgam.v5_3 * xd.v5 + \ hourgam.v6_3 * xd.v6 + hourgam.v7_3 * xd.v7; \ \ hgfx.v0 = coefficient * \ (hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \ hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \ hgfx.v1 = coefficient * \ (hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \ hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \ hgfx.v2 = coefficient * \ (hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \ hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \ hgfx.v3 = coefficient * \ (hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \ hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \ hgfx.v4 = coefficient * \ (hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \ hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \ hgfx.v5 = coefficient * \ (hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \ hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \ hgfx.v6 = coefficient * \ (hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \ hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \ hgfx.v7 = coefficient * \ (hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \ hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \ \ hxx.v0 = hourgam.v0_0 * yd.v0 + hourgam.v1_0 * yd.v1 + \ hourgam.v2_0 * yd.v2 + hourgam.v3_0 * yd.v3 + \ hourgam.v4_0 * yd.v4 + hourgam.v5_0 * yd.v5 + \ hourgam.v6_0 * yd.v6 + hourgam.v7_0 * yd.v7; \ hxx.v1 = hourgam.v0_1 * yd.v0 + hourgam.v1_1 * yd.v1 + \ hourgam.v2_1 * yd.v2 + hourgam.v3_1 * yd.v3 + \ hourgam.v4_1 * yd.v4 + hourgam.v5_1 * yd.v5 + \ hourgam.v6_1 * yd.v6 + hourgam.v7_1 * yd.v7; \ hxx.v2 = hourgam.v0_2 * yd.v0 + hourgam.v1_2 * yd.v1 + \ hourgam.v2_2 * yd.v2 + hourgam.v3_2 * yd.v3 + \ hourgam.v4_2 * yd.v4 + hourgam.v5_2 * yd.v5 + \ hourgam.v6_2 * yd.v6 + hourgam.v7_2 * yd.v7; \ hxx.v3 = hourgam.v0_3 * yd.v0 + hourgam.v1_3 * yd.v1 + \ hourgam.v2_3 * yd.v2 + hourgam.v3_3 * yd.v3 + \ hourgam.v4_3 * yd.v4 + hourgam.v5_3 * yd.v5 + \ hourgam.v6_3 * yd.v6 + hourgam.v7_3 * yd.v7; \ \ hgfy.v0 = coefficient * \ (hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \ hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \ hgfy.v1 = coefficient * \ (hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \ hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \ hgfy.v2 = coefficient * \ (hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \ hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \ hgfy.v3 = coefficient * \ (hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \ hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \ hgfy.v4 = coefficient * \ (hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \ hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \ hgfy.v5 = coefficient * \ (hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \ hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \ hgfy.v6 = coefficient * \ (hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \ hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \ hgfy.v7 = coefficient * \ (hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \ hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \ \ hxx.v0 = hourgam.v0_0 * zd.v0 + hourgam.v1_0 * zd.v1 + \ hourgam.v2_0 * zd.v2 + hourgam.v3_0 * zd.v3 + \ hourgam.v4_0 * zd.v4 + hourgam.v5_0 * zd.v5 + \ hourgam.v6_0 * zd.v6 + hourgam.v7_0 * zd.v7; \ hxx.v1 = hourgam.v0_1 * zd.v0 + hourgam.v1_1 * zd.v1 + \ hourgam.v2_1 * zd.v2 + hourgam.v3_1 * zd.v3 + \ hourgam.v4_1 * zd.v4 + hourgam.v5_1 * zd.v5 + \ hourgam.v6_1 * zd.v6 + hourgam.v7_1 * zd.v7; \ hxx.v2 = hourgam.v0_2 * zd.v0 + hourgam.v1_2 * zd.v1 + \ hourgam.v2_2 * zd.v2 + hourgam.v3_2 * zd.v3 + \ hourgam.v4_2 * zd.v4 + hourgam.v5_2 * zd.v5 + \ hourgam.v6_2 * zd.v6 + hourgam.v7_2 * zd.v7; \ hxx.v3 = hourgam.v0_3 * zd.v0 + hourgam.v1_3 * zd.v1 + \ hourgam.v2_3 * zd.v2 + hourgam.v3_3 * zd.v3 + \ hourgam.v4_3 * zd.v4 + hourgam.v5_3 * zd.v5 + \ hourgam.v6_3 * zd.v6 + hourgam.v7_3 * zd.v7; \ \ hgfz.v0 = coefficient * \ (hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \ hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \ hgfz.v1 = coefficient * \ (hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \ hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \ hgfz.v2 = coefficient * \ (hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \ hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \ hgfz.v3 = coefficient * \ (hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \ hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \ hgfz.v4 = coefficient * \ (hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \ hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \ hgfz.v5 = coefficient * \ (hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \ hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \ hgfz.v6 = coefficient * \ (hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \ hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \ hgfz.v7 = coefficient * \ (hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \ hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \ } while(0) /******************************************/ #define FillHourGam \ do { \ /* i = 0 */ \ Real_t hourmodx = \ x8n[i3] * gamma[0][0] + x8n[i3+1] * gamma[0][1] + \ x8n[i3+2] * gamma[0][2] + x8n[i3+3] * gamma[0][3] + \ x8n[i3+4] * gamma[0][4] + x8n[i3+5] * gamma[0][5] + \ x8n[i3+6] * gamma[0][6] + x8n[i3+7] * gamma[0][7]; \ \ Real_t hourmody = \ y8n[i3] * gamma[0][0] + y8n[i3+1] * gamma[0][1] + \ y8n[i3+2] * gamma[0][2] + y8n[i3+3] * gamma[0][3] + \ y8n[i3+4] * gamma[0][4] + y8n[i3+5] * gamma[0][5] + \ y8n[i3+6] * gamma[0][6] + y8n[i3+7] * gamma[0][7]; \ \ Real_t hourmodz = \ z8n[i3] * gamma[0][0] + z8n[i3+1] * gamma[0][1] + \ z8n[i3+2] * gamma[0][2] + z8n[i3+3] * gamma[0][3] + \ z8n[i3+4] * gamma[0][4] + z8n[i3+5] * gamma[0][5] + \ z8n[i3+6] * gamma[0][6] + z8n[i3+7] * gamma[0][7]; \ \ hourgam.v0_0 = gamma[0][0] - volinv*(dvdx[i3 ] * hourmodx + \ dvdy[i3 ] * hourmody + \ dvdz[i3 ] * hourmodz ); \ \ hourgam.v1_0 = gamma[0][1] - volinv*(dvdx[i3+1] * hourmodx + \ dvdy[i3+1] * hourmody + \ dvdz[i3+1] * hourmodz ); \ \ hourgam.v2_0 = gamma[0][2] - volinv*(dvdx[i3+2] * hourmodx + \ dvdy[i3+2] * hourmody + \ dvdz[i3+2] * hourmodz ); \ \ hourgam.v3_0 = gamma[0][3] - volinv*(dvdx[i3+3] * hourmodx + \ dvdy[i3+3] * hourmody + \ dvdz[i3+3] * hourmodz ); \ \ hourgam.v4_0 = gamma[0][4] - volinv*(dvdx[i3+4] * hourmodx + \ dvdy[i3+4] * hourmody + \ dvdz[i3+4] * hourmodz ); \ \ hourgam.v5_0 = gamma[0][5] - volinv*(dvdx[i3+5] * hourmodx + \ dvdy[i3+5] * hourmody + \ dvdz[i3+5] * hourmodz ); \ \ hourgam.v6_0 = gamma[0][6] - volinv*(dvdx[i3+6] * hourmodx + \ dvdy[i3+6] * hourmody + \ dvdz[i3+6] * hourmodz ); \ \ hourgam.v7_0 = gamma[0][7] - volinv*(dvdx[i3+7] * hourmodx + \ dvdy[i3+7] * hourmody + \ dvdz[i3+7] * hourmodz ); \ /* i = 1 */ \ hourmodx = \ x8n[i3] * gamma[1][0] + x8n[i3+1] * gamma[1][1] + \ x8n[i3+2] * gamma[1][2] + x8n[i3+3] * gamma[1][3] + \ x8n[i3+4] * gamma[1][4] + x8n[i3+5] * gamma[1][5] + \ x8n[i3+6] * gamma[1][6] + x8n[i3+7] * gamma[1][7]; \ \ hourmody = \ y8n[i3] * gamma[1][0] + y8n[i3+1] * gamma[1][1] + \ y8n[i3+2] * gamma[1][2] + y8n[i3+3] * gamma[1][3] + \ y8n[i3+4] * gamma[1][4] + y8n[i3+5] * gamma[1][5] + \ y8n[i3+6] * gamma[1][6] + y8n[i3+7] * gamma[1][7]; \ \ hourmodz = \ z8n[i3] * gamma[1][0] + z8n[i3+1] * gamma[1][1] + \ z8n[i3+2] * gamma[1][2] + z8n[i3+3] * gamma[1][3] + \ z8n[i3+4] * gamma[1][4] + z8n[i3+5] * gamma[1][5] + \ z8n[i3+6] * gamma[1][6] + z8n[i3+7] * gamma[1][7]; \ \ hourgam.v0_1 = gamma[1][0] - volinv*(dvdx[i3 ] * hourmodx + \ dvdy[i3 ] * hourmody + \ dvdz[i3 ] * hourmodz ); \ \ hourgam.v1_1 = gamma[1][1] - volinv*(dvdx[i3+1] * hourmodx + \ dvdy[i3+1] * hourmody + \ dvdz[i3+1] * hourmodz ); \ \ hourgam.v2_1 = gamma[1][2] - volinv*(dvdx[i3+2] * hourmodx + \ dvdy[i3+2] * hourmody + \ dvdz[i3+2] * hourmodz ); \ \ hourgam.v3_1 = gamma[1][3] - volinv*(dvdx[i3+3] * hourmodx + \ dvdy[i3+3] * hourmody + \ dvdz[i3+3] * hourmodz ); \ \ hourgam.v4_1 = gamma[1][4] - volinv*(dvdx[i3+4] * hourmodx + \ dvdy[i3+4] * hourmody + \ dvdz[i3+4] * hourmodz ); \ \ hourgam.v5_1 = gamma[1][5] - volinv*(dvdx[i3+5] * hourmodx + \ dvdy[i3+5] * hourmody + \ dvdz[i3+5] * hourmodz ); \ \ hourgam.v6_1 = gamma[1][6] - volinv*(dvdx[i3+6] * hourmodx + \ dvdy[i3+6] * hourmody + \ dvdz[i3+6] * hourmodz ); \ \ hourgam.v7_1 = gamma[1][7] - volinv*(dvdx[i3+7] * hourmodx + \ dvdy[i3+7] * hourmody + \ dvdz[i3+7] * hourmodz ); \ /* i = 2 */ \ hourmodx = \ x8n[i3] * gamma[2][0] + x8n[i3+1] * gamma[2][1] + \ x8n[i3+2] * gamma[2][2] + x8n[i3+3] * gamma[2][3] + \ x8n[i3+4] * gamma[2][4] + x8n[i3+5] * gamma[2][5] + \ x8n[i3+6] * gamma[2][6] + x8n[i3+7] * gamma[2][7]; \ \ hourmody = \ y8n[i3] * gamma[2][0] + y8n[i3+1] * gamma[2][1] + \ y8n[i3+2] * gamma[2][2] + y8n[i3+3] * gamma[2][3] + \ y8n[i3+4] * gamma[2][4] + y8n[i3+5] * gamma[2][5] + \ y8n[i3+6] * gamma[2][6] + y8n[i3+7] * gamma[2][7]; \ \ hourmodz = \ z8n[i3] * gamma[2][0] + z8n[i3+1] * gamma[2][1] + \ z8n[i3+2] * gamma[2][2] + z8n[i3+3] * gamma[2][3] + \ z8n[i3+4] * gamma[2][4] + z8n[i3+5] * gamma[2][5] + \ z8n[i3+6] * gamma[2][6] + z8n[i3+7] * gamma[2][7]; \ \ hourgam.v0_2 = gamma[2][0] - volinv*(dvdx[i3 ] * hourmodx + \ dvdy[i3 ] * hourmody + \ dvdz[i3 ] * hourmodz ); \ \ hourgam.v1_2 = gamma[2][1] - volinv*(dvdx[i3+1] * hourmodx + \ dvdy[i3+1] * hourmody + \ dvdz[i3+1] * hourmodz ); \ \ hourgam.v2_2 = gamma[2][2] - volinv*(dvdx[i3+2] * hourmodx + \ dvdy[i3+2] * hourmody + \ dvdz[i3+2] * hourmodz ); \ \ hourgam.v3_2 = gamma[2][3] - volinv*(dvdx[i3+3] * hourmodx + \ dvdy[i3+3] * hourmody + \ dvdz[i3+3] * hourmodz ); \ \ hourgam.v4_2 = gamma[2][4] - volinv*(dvdx[i3+4] * hourmodx + \ dvdy[i3+4] * hourmody + \ dvdz[i3+4] * hourmodz ); \ \ hourgam.v5_2 = gamma[2][5] - volinv*(dvdx[i3+5] * hourmodx + \ dvdy[i3+5] * hourmody + \ dvdz[i3+5] * hourmodz ); \ \ hourgam.v6_2 = gamma[2][6] - volinv*(dvdx[i3+6] * hourmodx + \ dvdy[i3+6] * hourmody + \ dvdz[i3+6] * hourmodz ); \ \ hourgam.v7_2 = gamma[2][7] - volinv*(dvdx[i3+7] * hourmodx + \ dvdy[i3+7] * hourmody + \ dvdz[i3+7] * hourmodz ); \ /* i = 3 */ \ hourmodx = \ x8n[i3] * gamma[3][0] + x8n[i3+1] * gamma[3][1] + \ x8n[i3+2] * gamma[3][2] + x8n[i3+3] * gamma[3][3] + \ x8n[i3+4] * gamma[3][4] + x8n[i3+5] * gamma[3][5] + \ x8n[i3+6] * gamma[3][6] + x8n[i3+7] * gamma[3][7]; \ \ hourmody = \ y8n[i3] * gamma[3][0] + y8n[i3+1] * gamma[3][1] + \ y8n[i3+2] * gamma[3][2] + y8n[i3+3] * gamma[3][3] + \ y8n[i3+4] * gamma[3][4] + y8n[i3+5] * gamma[3][5] + \ y8n[i3+6] * gamma[3][6] + y8n[i3+7] * gamma[3][7]; \ \ hourmodz = \ z8n[i3] * gamma[3][0] + z8n[i3+1] * gamma[3][1] + \ z8n[i3+2] * gamma[3][2] + z8n[i3+3] * gamma[3][3] + \ z8n[i3+4] * gamma[3][4] + z8n[i3+5] * gamma[3][5] + \ z8n[i3+6] * gamma[3][6] + z8n[i3+7] * gamma[3][7]; \ \ hourgam.v0_3 = gamma[3][0] - volinv*(dvdx[i3 ] * hourmodx + \ dvdy[i3 ] * hourmody + \ dvdz[i3 ] * hourmodz ); \ \ hourgam.v1_3 = gamma[3][1] - volinv*(dvdx[i3+1] * hourmodx + \ dvdy[i3+1] * hourmody + \ dvdz[i3+1] * hourmodz ); \ \ hourgam.v2_3 = gamma[3][2] - volinv*(dvdx[i3+2] * hourmodx + \ dvdy[i3+2] * hourmody + \ dvdz[i3+2] * hourmodz ); \ \ hourgam.v3_3 = gamma[3][3] - volinv*(dvdx[i3+3] * hourmodx + \ dvdy[i3+3] * hourmody + \ dvdz[i3+3] * hourmodz ); \ \ hourgam.v4_3 = gamma[3][4] - volinv*(dvdx[i3+4] * hourmodx + \ dvdy[i3+4] * hourmody + \ dvdz[i3+4] * hourmodz ); \ \ hourgam.v5_3 = gamma[3][5] - volinv*(dvdx[i3+5] * hourmodx + \ dvdy[i3+5] * hourmody + \ dvdz[i3+5] * hourmodz ); \ \ hourgam.v6_3 = gamma[3][6] - volinv*(dvdx[i3+6] * hourmodx + \ dvdy[i3+6] * hourmody + \ dvdz[i3+6] * hourmodz ); \ \ hourgam.v7_3 = gamma[3][7] - volinv*(dvdx[i3+7] * hourmodx + \ dvdy[i3+7] * hourmody + \ dvdz[i3+7] * hourmodz ); \ } while(0) static inline void CalcFBHourglassForceForElems( Index_t *nodelist, Index_t *nodeElemCount, Index_t *nodeElemStart, Index_t *nodeElemCornerList, Real_t *determ, Real_t *fx, Real_t *fy, Real_t *fz, Real_t *x8n, Real_t *y8n, Real_t *z8n, Real_t *dvdx, Real_t *dvdy, Real_t *dvdz, Real_t hourg, Index_t numElem, Index_t numNode) { #if !defined(_OPENACC) && defined(_OPENMP) Index_t numthreads = omp_get_max_threads(); #else Index_t numthreads = 1; #endif /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Index_t numElem8 = numElem * 8 ; Real_t *ss = m_ss; Real_t *elemMass = m_elemMass; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; Real_t *fx_elem = m_fx_elem; Real_t *fy_elem = m_fy_elem; Real_t *fz_elem = m_fz_elem; #ifdef USE_UNIFIEDMEM Real_t (*gamma)[8] = (Real_t (*)[8])acc_create_unified(NULL, sizeof(Real_t)*4*8); #else Real_t gamma[4][8]; #endif gamma[0][0] = (Real_t)( 1.); gamma[0][1] = (Real_t)( 1.); gamma[0][2] = (Real_t)(-1.); gamma[0][3] = (Real_t)(-1.); gamma[0][4] = (Real_t)(-1.); gamma[0][5] = (Real_t)(-1.); gamma[0][6] = (Real_t)( 1.); gamma[0][7] = (Real_t)( 1.); gamma[1][0] = (Real_t)( 1.); gamma[1][1] = (Real_t)(-1.); gamma[1][2] = (Real_t)(-1.); gamma[1][3] = (Real_t)( 1.); gamma[1][4] = (Real_t)(-1.); gamma[1][5] = (Real_t)( 1.); gamma[1][6] = (Real_t)( 1.); gamma[1][7] = (Real_t)(-1.); gamma[2][0] = (Real_t)( 1.); gamma[2][1] = (Real_t)(-1.); gamma[2][2] = (Real_t)( 1.); gamma[2][3] = (Real_t)(-1.); gamma[2][4] = (Real_t)( 1.); gamma[2][5] = (Real_t)(-1.); gamma[2][6] = (Real_t)( 1.); gamma[2][7] = (Real_t)(-1.); gamma[3][0] = (Real_t)(-1.); gamma[3][1] = (Real_t)( 1.); gamma[3][2] = (Real_t)(-1.); gamma[3][3] = (Real_t)( 1.); gamma[3][4] = (Real_t)( 1.); gamma[3][5] = (Real_t)(-1.); gamma[3][6] = (Real_t)( 1.); gamma[3][7] = (Real_t)(-1.); Index_t i2; /*************************************************/ /* compute the hourglass modes */ #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc kernels copyin(gamma[4][8]) \ present(fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ dvdx[numElem8], \ dvdy[numElem8], \ dvdz[numElem8], \ x8n[numElem8], \ y8n[numElem8], \ z8n[numElem8], \ nodelist[numElem8],\ determ[numElem], \ ss[numElem], \ elemMass[numElem]) async(0) #else #pragma acc kernels copyin(gamma[4][8]) \ present(fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ dvdx[numElem8], \ dvdy[numElem8], \ dvdz[numElem8], \ x8n[numElem8], \ y8n[numElem8], \ z8n[numElem8], \ nodelist[numElem8],\ determ[numElem], \ ss[numElem], \ elemMass[numElem]) #endif #pragma acc cache(gamma) #pragma acc loop independent #else #pragma omp parallel for firstprivate(numElem, hourg) #endif for(i2=0;i2<numElem;++i2){ val8 hgfx, hgfy, hgfz; Real_t coefficient; //Real_t hourgam[8][4]; hourmat hourgam; val8 xd1, yd1, zd1; const Index_t *elemToNode = &nodelist[i2*8]; Index_t i3=8*i2; Real_t volinv=(Real_t)(1.0)/determ[i2]; Real_t ss1, mass1, volume13 ; /* Large macro of unrolled loop */ FillHourGam; /* compute forces */ /* store forces into h arrays (force arrays) */ ss1 = ss[i2]; mass1 = elemMass[i2]; volume13 = pow(determ[i2], (1.0 / 3.0)); Index_t n0si2 = elemToNode[0]; Index_t n1si2 = elemToNode[1]; Index_t n2si2 = elemToNode[2]; Index_t n3si2 = elemToNode[3]; Index_t n4si2 = elemToNode[4]; Index_t n5si2 = elemToNode[5]; Index_t n6si2 = elemToNode[6]; Index_t n7si2 = elemToNode[7]; xd1.v0 = xd[n0si2]; xd1.v1 = xd[n1si2]; xd1.v2 = xd[n2si2]; xd1.v3 = xd[n3si2]; xd1.v4 = xd[n4si2]; xd1.v5 = xd[n5si2]; xd1.v6 = xd[n6si2]; xd1.v7 = xd[n7si2]; yd1.v0 = yd[n0si2]; yd1.v1 = yd[n1si2]; yd1.v2 = yd[n2si2]; yd1.v3 = yd[n3si2]; yd1.v4 = yd[n4si2]; yd1.v5 = yd[n5si2]; yd1.v6 = yd[n6si2]; yd1.v7 = yd[n7si2]; zd1.v0 = zd[n0si2]; zd1.v1 = zd[n1si2]; zd1.v2 = zd[n2si2]; zd1.v3 = zd[n3si2]; zd1.v4 = zd[n4si2]; zd1.v5 = zd[n5si2]; zd1.v6 = zd[n6si2]; zd1.v7 = zd[n7si2]; coefficient = - hourg * (Real_t)(0.01) * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1,yd1,zd1, hourgam, coefficient, hgfx, hgfy, hgfz); // With the threaded version, we write into local arrays per elem // so we don't have to worry about race conditions fx_elem[i3 + 0] = hgfx.v0; fx_elem[i3 + 1] = hgfx.v1; fx_elem[i3 + 2] = hgfx.v2; fx_elem[i3 + 3] = hgfx.v3; fx_elem[i3 + 4] = hgfx.v4; fx_elem[i3 + 5] = hgfx.v5; fx_elem[i3 + 6] = hgfx.v6; fx_elem[i3 + 7] = hgfx.v7; fy_elem[i3 + 0] = hgfy.v0; fy_elem[i3 + 1] = hgfy.v1; fy_elem[i3 + 2] = hgfy.v2; fy_elem[i3 + 3] = hgfy.v3; fy_elem[i3 + 4] = hgfy.v4; fy_elem[i3 + 5] = hgfy.v5; fy_elem[i3 + 6] = hgfy.v6; fy_elem[i3 + 7] = hgfy.v7; fz_elem[i3 + 0] = hgfz.v0; fz_elem[i3 + 1] = hgfz.v1; fz_elem[i3 + 2] = hgfz.v2; fz_elem[i3 + 3] = hgfz.v3; fz_elem[i3 + 4] = hgfz.v4; fz_elem[i3 + 5] = hgfz.v5; fz_elem[i3 + 6] = hgfz.v6; fz_elem[i3 + 7] = hgfz.v7; } // end accelerated for /* volatile because otherwise it will be optimized out of the pragma and break things. */ volatile Index_t nCorner = nodeElemStart[numNode-1] + nodeElemCount[numNode-1]; Index_t gnode; // Collect the data from the local arrays into the final force arrays #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc kernels loop independent vector(256) \ present(nodeElemCount[numNode], \ nodeElemStart[numNode], \ nodeElemCornerList[nCorner], \ fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ fx[numNode], \ fy[numNode], \ fz[numNode]) async(0) #else #pragma acc kernels loop independent vector(256) \ present(nodeElemCount[numNode], \ nodeElemStart[numNode], \ nodeElemCornerList[nCorner], \ fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ fx[numNode], \ fy[numNode], \ fz[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for( gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = nodeElemCount[gnode] ; Index_t start = nodeElemStart[gnode] ; Real_t fx_tmp = (Real_t)(0.0) ; Real_t fy_tmp = (Real_t)(0.0) ; Real_t fz_tmp = (Real_t)(0.0) ; Index_t i; for (i=0 ; i < count ; ++i) { Index_t elem = nodeElemCornerList[start+i] ; fx_tmp += fx_elem[elem] ; fy_tmp += fy_elem[elem] ; fz_tmp += fz_elem[elem] ; } fx[gnode] += fx_tmp ; fy[gnode] += fy_tmp ; fz[gnode] += fz_tmp ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif #ifdef USE_UNIFIEDMEM acc_delete_unified(gamma, 0); #endif } /******************************************/ #define LoadTmpStorageFBControl(dvdx, dvdy, dvdz, \ pfx, pfy, pfz, \ x8n, y8n, z8n, \ x1, y1, z1, \ i) \ do { \ Index_t jj; \ jj = 8*(i)+0; \ (dvdx)[jj] = (pfx).v0; \ (dvdy)[jj] = (pfy).v0; \ (dvdz)[jj] = (pfz).v0; \ (x8n)[jj] = (x1).v0; \ (y8n)[jj] = (y1).v0; \ (z8n)[jj] = (z1).v0; \ jj = 8*(i)+1; \ (dvdx)[jj] = (pfx).v1; \ (dvdy)[jj] = (pfy).v1; \ (dvdz)[jj] = (pfz).v1; \ (x8n)[jj] = (x1).v1; \ (y8n)[jj] = (y1).v1; \ (z8n)[jj] = (z1).v1; \ jj = 8*(i)+2; \ (dvdx)[jj] = (pfx).v2; \ (dvdy)[jj] = (pfy).v2; \ (dvdz)[jj] = (pfz).v2; \ (x8n)[jj] = (x1).v2; \ (y8n)[jj] = (y1).v2; \ (z8n)[jj] = (z1).v2; \ jj = 8*(i)+3; \ (dvdx)[jj] = (pfx).v3; \ (dvdy)[jj] = (pfy).v3; \ (dvdz)[jj] = (pfz).v3; \ (x8n)[jj] = (x1).v3; \ (y8n)[jj] = (y1).v3; \ (z8n)[jj] = (z1).v3; \ jj = 8*(i)+4; \ (dvdx)[jj] = (pfx).v4; \ (dvdy)[jj] = (pfy).v4; \ (dvdz)[jj] = (pfz).v4; \ (x8n)[jj] = (x1).v4; \ (y8n)[jj] = (y1).v4; \ (z8n)[jj] = (z1).v4; \ jj = 8*(i)+5; \ (dvdx)[jj] = (pfx).v5; \ (dvdy)[jj] = (pfy).v5; \ (dvdz)[jj] = (pfz).v5; \ (x8n)[jj] = (x1).v5; \ (y8n)[jj] = (y1).v5; \ (z8n)[jj] = (z1).v5; \ jj = 8*(i)+6; \ (dvdx)[jj] = (pfx).v6; \ (dvdy)[jj] = (pfy).v6; \ (dvdz)[jj] = (pfz).v6; \ (x8n)[jj] = (x1).v6; \ (y8n)[jj] = (y1).v6; \ (z8n)[jj] = (z1).v6; \ jj = 8*(i)+7; \ (dvdx)[jj] = (pfx).v7; \ (dvdy)[jj] = (pfy).v7; \ (dvdz)[jj] = (pfz).v7; \ (x8n)[jj] = (x1).v7; \ (y8n)[jj] = (y1).v7; \ (z8n)[jj] = (z1).v7; \ } while(0) \ static inline void CalcHourglassControlForElems( Real_t *x, Real_t *y, Real_t *z, Real_t *fx, Real_t *fy, Real_t *fz, Real_t determ[], Real_t hgcoef, Index_t *nodelist, Index_t *nodeElemCount, Index_t *nodeElemStart, Index_t *nodeElemCornerList) { Index_t numElem = m_numElem ; volatile Index_t numElem8 = numElem * 8 ; volatile Index_t numNode = m_numNode; Real_t *dvdx = m_dvdx; Real_t *dvdy = m_dvdy; Real_t *dvdz = m_dvdz; Real_t *x8n = m_x8n; Real_t *y8n = m_y8n; Real_t *z8n = m_z8n; Real_t *volo = m_volo; Real_t *v = m_v; int abort = 0; /* start loop over elements */ Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(dvdx[numElem8], \ dvdy[numElem8], \ dvdz[numElem8], \ x8n[numElem8], \ y8n[numElem8], \ z8n[numElem8], \ x[numNode], \ y[numNode], \ z[numNode], \ volo[numElem], \ v[numElem], \ determ[numElem], \ nodelist[numElem8]) \ reduction(max: abort) async(0) #else #pragma acc parallel loop present(dvdx[numElem8], \ dvdy[numElem8], \ dvdz[numElem8], \ x8n[numElem8], \ y8n[numElem8], \ z8n[numElem8], \ x[numNode], \ y[numNode], \ z[numNode], \ volo[numElem], \ v[numElem], \ determ[numElem], \ nodelist[numElem8]) \ reduction(max: abort) #endif #else #pragma omp parallel for firstprivate(numElem) reduction(max: abort) #endif for (i=0 ; i<numElem ; ++i){ val8 x1; val8 y1; val8 z1; val8 pfx; val8 pfy; val8 pfz; Index_t* elemToNode = &nodelist[i*8]; CollectDomainNodesToElemNodes(x, y, z, elemToNode, x1, y1, z1); CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1); /* load into temporary storage for FB Hour Glass control */ LoadTmpStorageFBControl(dvdx, dvdy, dvdz, pfx, pfy, pfz, x8n, y8n, z8n, x1, y1, z1, i); determ[i] = volo[i] * v[i]; /* Do a check for negative volumes */ if ( v[i] <= (Real_t)(0.0) ) { abort = 1; } } // end for #ifdef USE_ASYNC #pragma acc wait(0) #endif if(abort) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, VolumeError) ; #else exit(VolumeError); #endif } if ( hgcoef > (Real_t)(0.) ) { CalcFBHourglassForceForElems(nodelist, nodeElemCount, nodeElemStart, nodeElemCornerList, determ, fx, fy, fz, x8n, y8n, z8n, dvdx, dvdy, dvdz, hgcoef, numElem, numNode ); } return ; } /******************************************/ static inline void CalcVolumeForceForElems(Real_t *fx, Real_t *fy, Real_t *fz) { Index_t numElem = m_numElem ; Index_t numNode = m_numNode; if (numElem != 0) { Real_t hgcoef = m_hgcoef ; Real_t *sigxx = m_sigxx; Real_t *sigyy = m_sigyy; Real_t *sigzz = m_sigzz; Real_t *determ = m_determ; Real_t *p = m_p; Real_t *q = m_q; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Index_t *nodelist = m_nodelist; Index_t *nodeElemCount = m_nodeElemCount; Index_t *nodeElemStart = m_nodeElemStart; Index_t *nodeElemCornerList = m_nodeElemCornerList; /* Sum contributions to total stress tensor */ InitStressTermsForElems(p, q, sigxx, sigyy, sigzz, numElem); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems( nodelist, x, y, z, fx, fy, fz, m_fx_elem, m_fy_elem, m_fz_elem, nodeElemCount, nodeElemStart, nodeElemCornerList, sigxx, sigyy, sigzz, determ, numElem, numNode); int abort = 0; Index_t k; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(determ[numElem]) \ reduction(max: abort) async(0) #else #pragma acc parallel loop present(determ[numElem]) \ reduction(max: abort) #endif #else #pragma omp parallel for reduction(max:abort) firstprivate(numElem) #endif for(k = 0; k < numElem; ++k) { if(determ[k] <= (Real_t)(0.0)) { abort = 1; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif if(abort == 1) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, VolumeError) ; #else exit(VolumeError); #endif } CalcHourglassControlForElems(x, y, z, fx, fy, fz, determ, hgcoef, nodelist, nodeElemCount, nodeElemStart, nodeElemCornerList); } } /******************************************/ static inline void CalcForceForNodes() { Index_t numNode = m_numNode ; #if USE_MPI CommRecv(MSG_COMM_SBN, 3, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, true, false) ; #endif Real_t *fx = m_fx; Real_t *fy = m_fy; Real_t *fz = m_fz; Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(fx[numNode], \ fy[numNode], \ fz[numNode]) async(0) #else #pragma acc parallel loop present(fx[numNode], \ fy[numNode], \ fz[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for (i=0; i<numNode; ++i) { fx[i] = (Real_t)(0.0); fy[i] = (Real_t)(0.0); fz[i] = (Real_t)(0.0); } #ifdef USE_ASYNC #pragma acc wait(0) #endif /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems(fx, fy, fz) ; #if USE_MPI Real_t *fieldData[3] ; #pragma acc data present(fx[numNode], \ fy[numNode], \ fz[numNode]) { #pragma acc update host(fx[numNode], \ fy[numNode], \ fz[numNode]) fieldData[0] = fx; fieldData[1] = fy; fieldData[2] = fz; CommSend(MSG_COMM_SBN, 3, fieldData, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, true, false) ; CommSBN(3, fieldData) ; #ifdef USE_UNIFIEDMEM #pragma acc update device(fx[numNode], \ fy[numNode], \ fz[numNode]) #else #pragma acc update device(fx[numNode], \ fy[numNode], \ fz[numNode]) \ async #endif } // end acc data #endif } /******************************************/ static inline void CalcAccelerationForNodes(Real_t *xdd, Real_t *ydd, Real_t *zdd, Real_t *fx, Real_t *fy, Real_t *fz, Real_t *nodalMass, Index_t numNode) { Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(fx[numNode], \ fy[numNode], \ fz[numNode], \ xdd[numNode], \ ydd[numNode], \ zdd[numNode], \ nodalMass[numNode]) async(0) #else #pragma acc parallel loop present(fx[numNode], \ fy[numNode], \ fz[numNode], \ xdd[numNode], \ ydd[numNode], \ zdd[numNode], \ nodalMass[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for (i = 0; i < numNode; ++i) { xdd[i] = fx[i] / nodalMass[i]; ydd[i] = fy[i] / nodalMass[i]; zdd[i] = fz[i] / nodalMass[i]; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void ApplyAccelerationBoundaryConditionsForNodes( Real_t *xdd, Real_t *ydd, Real_t *zdd) { volatile Index_t numNode = m_numNode; volatile Index_t size = m_sizeX; Index_t numNodeBC = (size+1)*(size+1) ; Index_t *symmX = m_symmX; Index_t *symmY = m_symmY; Index_t *symmZ = m_symmZ; /* replace conditional loops with altered end conditions. This allows to do the equivalent of a nowait on the device too. */ Index_t endX = m_symmXempty ? 0 : numNodeBC; Index_t endY = m_symmYempty ? 0 : numNodeBC; Index_t endZ = m_symmZempty ? 0 : numNodeBC; if (!endX && !endY && !endZ) return; /* OPENARC BUG */ Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel firstprivate(numNodeBC) \ present(xdd[numNode], \ ydd[numNode], \ zdd[numNode], \ symmX[numNodeBC], \ symmY[numNodeBC], \ symmZ[numNodeBC]) async(0) #else #pragma acc parallel firstprivate(numNodeBC) \ present(xdd[numNode], \ ydd[numNode], \ zdd[numNode], \ symmX[numNodeBC], \ symmY[numNodeBC], \ symmZ[numNodeBC]) #endif #else #pragma omp parallel firstprivate(numNodeBC) #endif { #ifdef _OPENACC #pragma acc loop #else #pragma omp for nowait #endif for(i=0 ; i<endX ; ++i) { xdd[symmX[i]] = (Real_t)(0.0) ; } #ifdef _OPENACC #pragma acc loop #else #pragma omp for nowait #endif for(i=0 ; i<endY ; ++i) { ydd[symmY[i]] = (Real_t)(0.0) ; } #ifdef _OPENACC #pragma acc loop #else #pragma omp for nowait #endif for(i=0 ; i<endZ ; ++i) { zdd[symmZ[i]] = (Real_t)(0.0) ; } } // end parallel region #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void CalcVelocityForNodes(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *xdd, Real_t *ydd, Real_t *zdd, const Real_t dt, const Real_t u_cut, Index_t numNode) { Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(xd[numNode], \ yd[numNode], \ zd[numNode], \ xdd[numNode], \ ydd[numNode], \ zdd[numNode]) async(0) #else #pragma acc parallel loop present(xd[numNode], \ yd[numNode], \ zd[numNode], \ xdd[numNode], \ ydd[numNode], \ zdd[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for ( i = 0 ; i < numNode ; ++i ) { Real_t xdtmp, ydtmp, zdtmp ; xdtmp = xd[i] + xdd[i] * dt ; if( fabs(xdtmp) < u_cut ) xdtmp = (Real_t)(0.0); xd[i] = xdtmp ; ydtmp = yd[i] + ydd[i] * dt ; if( fabs(ydtmp) < u_cut ) ydtmp = (Real_t)(0.0); yd[i] = ydtmp ; zdtmp = zd[i] + zdd[i] * dt ; if( fabs(zdtmp) < u_cut ) zdtmp = (Real_t)(0.0); zd[i] = zdtmp ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void CalcPositionForNodes(Real_t *x, Real_t *y, Real_t *z, Real_t *xd, Real_t *yd, Real_t *zd, const Real_t dt, Index_t numNode) { Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #else #pragma acc parallel loop present(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) #endif #else #pragma omp parallel for firstprivate(numNode) #endif for ( i = 0 ; i < numNode ; ++i ) { x[i] += xd[i] * dt ; y[i] += yd[i] * dt ; z[i] += zd[i] * dt ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void LagrangeNodal() { #ifdef SEDOV_SYNC_POS_VEL_EARLY Real_t *fieldData[6] ; #endif const Real_t delt = m_deltatime ; Real_t u_cut = m_u_cut ; Index_t numNode = m_numNode; Index_t numElem = m_numElem; Real_t *fx = m_fx; Real_t *fy = m_fy; Real_t *fz = m_fz; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; Real_t *xdd = m_xdd; Real_t *ydd = m_ydd; Real_t *zdd = m_zdd; Real_t *nodalMass = m_nodalMass; /* time of boundary condition evaluation is beginning of step for force and * acceleration boundary conditions. */ CalcForceForNodes(); #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_EARLY CommRecv(MSG_SYNC_POS_VEL, 6, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, false, false) ; #endif #endif // redundant data region to allow for early acc updates before communication #pragma acc data present(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) { #if USE_MPI /* used for async update */ volatile int up = 1; /* wait for async device update in CalcForceForNodes to complete */ #ifndef USE_UNIFIEDMEM #pragma acc wait #endif #endif CalcAccelerationForNodes(xdd, ydd, zdd, fx, fy, fz, nodalMass, numNode); ApplyAccelerationBoundaryConditionsForNodes(xdd, ydd, zdd); CalcVelocityForNodes( xd, yd, zd, xdd, ydd, zdd, delt, u_cut, m_numNode) ; #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_EARLY /* start to update velocities asynchronously before the MPI comm */ #ifdef USE_UNIFIEDMEM #pragma acc update host(xd[numNode], \ yd[numNode], \ zd[numNode]) #else #pragma acc update host(xd[numNode], \ yd[numNode], \ zd[numNode]) \ async(up) #endif #endif #endif CalcPositionForNodes( x, y, z, xd, yd, zd, delt, m_numNode ); #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_EARLY #ifdef USE_UNIFIEDMEM #pragma acc update host(x[numNode], \ y[numNode], \ z[numNode]) #else #pragma acc update host(x[numNode], \ y[numNode], \ z[numNode]) \ async(up) #endif #ifndef USE_UNIFIEDMEM #pragma acc wait(up) #endif fieldData[0] = x ; fieldData[1] = y ; fieldData[2] = z ; fieldData[3] = xd ; fieldData[4] = yd ; fieldData[5] = zd ; CommSend(MSG_SYNC_POS_VEL, 6, fieldData, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, false, false) ; CommSyncPosVel() ; /* update device after CommRecv */ #ifdef USE_UNIFIEDMEM #pragma acc update device(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) #else #pragma acc update device(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) \ async #endif #endif #endif } // end acc data return; } /******************************************/ static inline Real_t CalcElemVolumeStatic( const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t x6, const Real_t x7, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t y6, const Real_t y7, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, const Real_t z6, const Real_t z7 ) { Real_t twelveth = (Real_t)(1.0)/(Real_t)(12.0); Real_t dx61 = x6 - x1; Real_t dy61 = y6 - y1; Real_t dz61 = z6 - z1; Real_t dx70 = x7 - x0; Real_t dy70 = y7 - y0; Real_t dz70 = z7 - z0; Real_t dx63 = x6 - x3; Real_t dy63 = y6 - y3; Real_t dz63 = z6 - z3; Real_t dx20 = x2 - x0; Real_t dy20 = y2 - y0; Real_t dz20 = z2 - z0; Real_t dx50 = x5 - x0; Real_t dy50 = y5 - y0; Real_t dz50 = z5 - z0; Real_t dx64 = x6 - x4; Real_t dy64 = y6 - y4; Real_t dz64 = z6 - z4; Real_t dx31 = x3 - x1; Real_t dy31 = y3 - y1; Real_t dz31 = z3 - z1; Real_t dx72 = x7 - x2; Real_t dy72 = y7 - y2; Real_t dz72 = z7 - z2; Real_t dx43 = x4 - x3; Real_t dy43 = y4 - y3; Real_t dz43 = z4 - z3; Real_t dx57 = x5 - x7; Real_t dy57 = y5 - y7; Real_t dz57 = z5 - z7; Real_t dx14 = x1 - x4; Real_t dy14 = y1 - y4; Real_t dz14 = z1 - z4; Real_t dx25 = x2 - x5; Real_t dy25 = y2 - y5; Real_t dz25 = z2 - z5; #define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \ ((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2))) Real_t volume = TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, dy31 + dy72, dy63, dy20, dz31 + dz72, dz63, dz20) + TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, dy43 + dy57, dy64, dy70, dz43 + dz57, dz64, dz70) + TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, dy14 + dy25, dy61, dy50, dz14 + dz25, dz61, dz50); #undef TRIPLE_PRODUCT volume *= twelveth; return volume ; } /* defined again outside because you can not define macros within macros */ #define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \ ((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2))) //static inline #define CalcElemVolume_Full(x0, x1, \ x2, x3, \ x4, x5, \ x6, x7, \ y0, y1, \ y2, y3, \ y4, y5, \ y6, y7, \ z0, z1, \ z2, z3, \ z4, z5, \ z6, z7) \ do { \ Real_t twelveth = (Real_t)(1.0)/(Real_t)(12.0); \ \ Real_t dx61 = (x6) - (x1); \ Real_t dy61 = (y6) - (y1); \ Real_t dz61 = (z6) - (z1); \ \ Real_t dx70 = (x7) - (x0); \ Real_t dy70 = (y7) - (y0); \ Real_t dz70 = (z7) - (z0); \ \ Real_t dx63 = (x6) - (x3); \ Real_t dy63 = (y6) - (y3); \ Real_t dz63 = (z6) - (z3); \ \ Real_t dx20 = (x2) - (x0); \ Real_t dy20 = (y2) - (y0); \ Real_t dz20 = (z2) - (z0); \ \ Real_t dx50 = (x5) - (x0); \ Real_t dy50 = (y5) - (y0); \ Real_t dz50 = (z5) - (z0); \ \ Real_t dx64 = (x6) - (x4); \ Real_t dy64 = (y6) - (y4); \ Real_t dz64 = (z6) - (z4); \ \ Real_t dx31 = (x3) - (x1); \ Real_t dy31 = (y3) - (y1); \ Real_t dz31 = (z3) - (z1); \ \ Real_t dx72 = (x7) - (x2); \ Real_t dy72 = (y7) - (y2); \ Real_t dz72 = (z7) - (z2); \ \ Real_t dx43 = (x4) - (x3); \ Real_t dy43 = (y4) - (y3); \ Real_t dz43 = (z4) - (z3); \ \ Real_t dx57 = (x5) - (x7); \ Real_t dy57 = (y5) - (y7); \ Real_t dz57 = (z5) - (z7); \ \ Real_t dx14 = (x1) - (x4); \ Real_t dy14 = (y1) - (y4); \ Real_t dz14 = (z1) - (z4); \ \ Real_t dx25 = (x2) - (x5); \ Real_t dy25 = (y2) - (y5); \ Real_t dz25 = (z2) - (z5); \ \ volume = \ TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, \ dy31 + dy72, dy63, dy20, \ dz31 + dz72, dz63, dz20) + \ TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, \ dy43 + dy57, dy64, dy70, \ dz43 + dz57, dz64, dz70) + \ TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, \ dy14 + dy25, dy61, dy50, \ dz14 + dz25, dz61, dz50); \ \ volume *= twelveth; \ } while(0) /******************************************/ //inline Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] ) { return CalcElemVolumeStatic( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]); } //static inline #define CalcElemVolume_macro(x,y,z) \ do { \ CalcElemVolume_Full((x.v0), (x.v1), (x.v2), (x.v3), (x.v4), (x.v5), (x.v6), (x.v7), \ (y.v0), (y.v1), (y.v2), (y.v3), (y.v4), (y.v5), (y.v6), (y.v7), \ (z.v0), (z.v1), (z.v2), (z.v3), (z.v4), (z.v5), (z.v6), (z.v7)); \ } while(0) /******************************************/ //static inline #define AreaFace_macro(x0, x1, \ x2, x3, \ y0, y1, \ y2, y3, \ z0, z1, \ z2, z3) \ do { \ Real_t fx = (x2 - x0) - (x3 - x1); \ Real_t fy = (y2 - y0) - (y3 - y1); \ Real_t fz = (z2 - z0) - (z3 - z1); \ Real_t gx = (x2 - x0) + (x3 - x1); \ Real_t gy = (y2 - y0) + (y3 - y1); \ Real_t gz = (z2 - z0) + (z3 - z1); \ a = \ (fx * fx + fy * fy + fz * fz) * \ (gx * gx + gy * gy + gz * gz) - \ (fx * gx + fy * gy + fz * gz) * \ (fx * gx + fy * gy + fz * gz); \ } while(0) /******************************************/ //static inline #define CalcElemCharacteristicLength_macro(x, \ y, \ z, \ volume) \ do { \ charLength = (Real_t)(0.0); \ Real_t a; \ \ AreaFace_macro(x.v0,x.v1,x.v2,x.v3, \ y.v0,y.v1,y.v2,y.v3, \ z.v0,z.v1,z.v2,z.v3) ; \ charLength = MAX(a,charLength) ; \ \ AreaFace_macro(x.v4,x.v5,x.v6,x.v7, \ y.v4,y.v5,y.v6,y.v7, \ z.v4,z.v5,z.v6,z.v7) ; \ charLength = MAX(a,charLength) ; \ \ AreaFace_macro(x.v0,x.v1,x.v5,x.v4, \ y.v0,y.v1,y.v5,y.v4, \ z.v0,z.v1,z.v5,z.v4) ; \ charLength = MAX(a,charLength) ; \ \ AreaFace_macro(x.v1,x.v2,x.v6,x.v5, \ y.v1,y.v2,y.v6,y.v5, \ z.v1,z.v2,z.v6,z.v5) ; \ charLength = MAX(a,charLength) ; \ \ AreaFace_macro(x.v2,x.v3,x.v7,x.v6, \ y.v2,y.v3,y.v7,y.v6, \ z.v2,z.v3,z.v7,z.v6) ; \ charLength = MAX(a,charLength) ; \ \ AreaFace_macro(x.v3,x.v0,x.v4,x.v7, \ y.v3,y.v0,y.v4,y.v7, \ z.v3,z.v0,z.v4,z.v7) ; \ charLength = MAX(a,charLength) ; \ \ charLength = (Real_t)(4.0) * volume / sqrt(charLength); \ } while(0) /******************************************/ //static inline #define CalcElemVelocityGradient_macro(xvel, \ yvel, \ zvel, \ b, \ detJ, \ d) \ do { \ const Real_t inv_detJ = (Real_t)(1.0) / detJ ; \ Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz; \ \ d.v0 = inv_detJ * ( b.v0_0 * (xvel.v0-xvel.v6) \ + b.v0_1 * (xvel.v1-xvel.v7) \ + b.v0_2 * (xvel.v2-xvel.v4) \ + b.v0_3 * (xvel.v3-xvel.v5) ); \ \ d.v1 = inv_detJ * ( b.v1_0 * (yvel.v0-yvel.v6) \ + b.v1_1 * (yvel.v1-yvel.v7) \ + b.v1_2 * (yvel.v2-yvel.v4) \ + b.v1_3 * (yvel.v3-yvel.v5) ); \ \ d.v2 = inv_detJ * ( b.v2_0 * (zvel.v0-zvel.v6) \ + b.v2_1 * (zvel.v1-zvel.v7) \ + b.v2_2 * (zvel.v2-zvel.v4) \ + b.v2_3 * (zvel.v3-zvel.v5) ); \ \ dyddx = inv_detJ * ( b.v0_0 * (yvel.v0-yvel.v6) \ + b.v0_1 * (yvel.v1-yvel.v7) \ + b.v0_2 * (yvel.v2-yvel.v4) \ + b.v0_3 * (yvel.v3-yvel.v5) ); \ \ dxddy = inv_detJ * ( b.v1_0 * (xvel.v0-xvel.v6) \ + b.v1_1 * (xvel.v1-xvel.v7) \ + b.v1_2 * (xvel.v2-xvel.v4) \ + b.v1_3 * (xvel.v3-xvel.v5) ); \ \ dzddx = inv_detJ * ( b.v0_0 * (zvel.v0-zvel.v6) \ + b.v0_1 * (zvel.v1-zvel.v7) \ + b.v0_2 * (zvel.v2-zvel.v4) \ + b.v0_3 * (zvel.v3-zvel.v5) ); \ \ dxddz = inv_detJ * ( b.v2_0 * (xvel.v0-xvel.v6) \ + b.v2_1 * (xvel.v1-xvel.v7) \ + b.v2_2 * (xvel.v2-xvel.v4) \ + b.v2_3 * (xvel.v3-xvel.v5) ); \ \ dzddy = inv_detJ * ( b.v1_0 * (zvel.v0-zvel.v6) \ + b.v1_1 * (zvel.v1-zvel.v7) \ + b.v1_2 * (zvel.v2-zvel.v4) \ + b.v1_3 * (zvel.v3-zvel.v5) ); \ \ dyddz = inv_detJ * ( b.v2_0 * (yvel.v0-yvel.v6) \ + b.v2_1 * (yvel.v1-yvel.v7) \ + b.v2_2 * (yvel.v2-yvel.v4) \ + b.v2_3 * (yvel.v3-yvel.v5) ); \ d.v5 = (Real_t)( .5) * ( dxddy + dyddx ); \ d.v4 = (Real_t)( .5) * ( dxddz + dzddx ); \ d.v3 = (Real_t)( .5) * ( dzddy + dyddz ); \ } while(0) /******************************************/ //static inline void CalcKinematicsForElems( Index_t *nodelist, Real_t *x, Real_t *y, Real_t *z, Real_t *xd, Real_t *yd, Real_t *zd, Real_t *dxx, Real_t *dyy, Real_t *dzz, Real_t *v, Real_t *volo, Real_t *vnew, Real_t *delv, Real_t *arealg, Real_t deltaTime, Index_t numElem, Index_t numNode) { volatile Index_t numElem8 = numElem * 8; Index_t k; // loop over all elements #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(dxx[numElem], \ dyy[numElem], \ dzz[numElem], \ x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ v[numElem], \ volo[numElem], \ vnew[numElem], \ delv[numElem], \ arealg[numElem], \ nodelist[numElem8]) async(0) #else #pragma acc parallel loop present(dxx[numElem], \ dyy[numElem], \ dzz[numElem], \ x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ v[numElem], \ volo[numElem], \ vnew[numElem], \ delv[numElem], \ arealg[numElem], \ nodelist[numElem8]) #endif #else #pragma omp parallel for firstprivate(numElem, deltaTime) #endif for( k=0 ; k<numElem ; ++k ) { bmat B ; /** shape function derivatives */ val6 D ; val8 x_local ; val8 y_local ; val8 z_local ; val8 xd_local ; val8 yd_local ; val8 zd_local ; Real_t detJ = (Real_t)(0.0) ; Real_t volume ; Real_t relativeVolume ; const Index_t* const elemToNode = &nodelist[8*k] ; // get nodal coordinates from global arrays and copy into local arrays. // Loop unrolled because the PGI OpenACC implementation currently stores // locally-defined arrays in a global, shared context. Thus we have to use // scalars instead to get them in registers. Index_t gnode; gnode = elemToNode[0]; x_local.v0 = x[gnode]; y_local.v0 = y[gnode]; z_local.v0 = z[gnode]; gnode = elemToNode[1]; x_local.v1 = x[gnode]; y_local.v1 = y[gnode]; z_local.v1 = z[gnode]; gnode = elemToNode[2]; x_local.v2 = x[gnode]; y_local.v2 = y[gnode]; z_local.v2 = z[gnode]; gnode = elemToNode[3]; x_local.v3 = x[gnode]; y_local.v3 = y[gnode]; z_local.v3 = z[gnode]; gnode = elemToNode[4]; x_local.v4 = x[gnode]; y_local.v4 = y[gnode]; z_local.v4 = z[gnode]; gnode = elemToNode[5]; x_local.v5 = x[gnode]; y_local.v5 = y[gnode]; z_local.v5 = z[gnode]; gnode = elemToNode[6]; x_local.v6 = x[gnode]; y_local.v6 = y[gnode]; z_local.v6 = z[gnode]; gnode = elemToNode[7]; x_local.v7 = x[gnode]; y_local.v7 = y[gnode]; z_local.v7 = z[gnode]; // volume calculations - CalcElemVolume is a macro that sets volume CalcElemVolume_macro(x_local, y_local, z_local ); relativeVolume = volume / volo[k] ; vnew[k] = relativeVolume ; delv[k] = relativeVolume - v[k] ; // set characteristic length Real_t charLength; CalcElemCharacteristicLength_macro(x_local, y_local, z_local, volume); arealg[k] = charLength; // get nodal velocities from global array and copy into local arrays. gnode = elemToNode[0]; xd_local.v0 = xd[gnode]; yd_local.v0 = yd[gnode]; zd_local.v0 = zd[gnode]; gnode = elemToNode[1]; xd_local.v1 = xd[gnode]; yd_local.v1 = yd[gnode]; zd_local.v1 = zd[gnode]; gnode = elemToNode[2]; xd_local.v2 = xd[gnode]; yd_local.v2 = yd[gnode]; zd_local.v2 = zd[gnode]; gnode = elemToNode[3]; xd_local.v3 = xd[gnode]; yd_local.v3 = yd[gnode]; zd_local.v3 = zd[gnode]; gnode = elemToNode[4]; xd_local.v4 = xd[gnode]; yd_local.v4 = yd[gnode]; zd_local.v4 = zd[gnode]; gnode = elemToNode[5]; xd_local.v5 = xd[gnode]; yd_local.v5 = yd[gnode]; zd_local.v5 = zd[gnode]; gnode = elemToNode[6]; xd_local.v6 = xd[gnode]; yd_local.v6 = yd[gnode]; zd_local.v6 = zd[gnode]; gnode = elemToNode[7]; xd_local.v7 = xd[gnode]; yd_local.v7 = yd[gnode]; zd_local.v7 = zd[gnode]; Real_t dt2 = (Real_t)(0.5) * deltaTime; x_local.v0 -= dt2 * xd_local.v0; y_local.v0 -= dt2 * yd_local.v0; z_local.v0 -= dt2 * zd_local.v0; x_local.v1 -= dt2 * xd_local.v1; y_local.v1 -= dt2 * yd_local.v1; z_local.v1 -= dt2 * zd_local.v1; x_local.v2 -= dt2 * xd_local.v2; y_local.v2 -= dt2 * yd_local.v2; z_local.v2 -= dt2 * zd_local.v2; x_local.v3 -= dt2 * xd_local.v3; y_local.v3 -= dt2 * yd_local.v3; z_local.v3 -= dt2 * zd_local.v3; x_local.v4 -= dt2 * xd_local.v4; y_local.v4 -= dt2 * yd_local.v4; z_local.v4 -= dt2 * zd_local.v4; x_local.v5 -= dt2 * xd_local.v5; y_local.v5 -= dt2 * yd_local.v5; z_local.v5 -= dt2 * zd_local.v5; x_local.v6 -= dt2 * xd_local.v6; y_local.v6 -= dt2 * yd_local.v6; z_local.v6 -= dt2 * zd_local.v6; x_local.v7 -= dt2 * xd_local.v7; y_local.v7 -= dt2 * yd_local.v7; z_local.v7 -= dt2 * zd_local.v7; CalcElemShapeFunctionDerivatives_unrolled( x_local, y_local, z_local, B, detJ ); CalcElemVelocityGradient_macro( xd_local, yd_local, zd_local, B, detJ, D ); // put velocity gradient quantities into their global arrays. dxx[k] = D.v0; dyy[k] = D.v1; dzz[k] = D.v2; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void CalcLagrangeElements(Real_t* vnew) { Index_t numElem = m_numElem ; Index_t numNode = m_numNode ; if (numElem > 0) { const Real_t deltatime = m_deltatime ; // strains are now allocated at startup to prevent unnecessary mem transfers Real_t *dxx = m_dxx; Real_t *dyy = m_dyy; Real_t *dzz = m_dzz; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; Real_t *v = m_v; Real_t *volo = m_volo; Real_t *vdov = m_vdov; Real_t *delv = m_delv; Real_t *arealg = m_arealg; Index_t *nodelist = m_nodelist; CalcKinematicsForElems(nodelist, x, y, z, xd, yd, zd, dxx, dyy, dzz, v, volo, vnew, delv, arealg, deltatime, numElem, numNode); // element loop to do some stuff not included in the elemlib function. int abort = 0; Index_t k; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vdov[numElem], \ dxx[numElem], \ dyy[numElem], \ dzz[numElem], \ vnew[numElem]) \ reduction(max: abort) async(0) #else #pragma acc parallel loop present(vdov[numElem], \ dxx[numElem], \ dyy[numElem], \ dzz[numElem], \ vnew[numElem]) \ reduction(max: abort) #endif #else #pragma omp parallel for firstprivate(numElem) reduction(max: abort) #endif for ( k=0 ; k<numElem ; ++k ) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdov_k = dxx[k] + dyy[k] + dzz[k] ; Real_t vdovthird = vdov_k/(Real_t)(3.0) ; // make the rate of deformation tensor deviatoric vdov[k] = vdov_k ; dxx[k] -= vdovthird ; dyy[k] -= vdovthird ; dzz[k] -= vdovthird ; // See if any volumes are negative, and take appropriate action. if (vnew[k] <= (Real_t)(0.0)) { abort = 1; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif if(abort) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, VolumeError) ; #else exit(VolumeError); #endif } } // end if numElem > 0 } /******************************************/ static inline void CalcMonotonicQGradientsForElems(Real_t vnew[], Index_t allElem) { volatile Index_t numNode = m_numNode; Index_t numElem = m_numElem; volatile Int_t numElem8 = m_numElem * 8; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; Real_t *volo = m_volo; Index_t *nodelist = m_nodelist; Real_t *delv_xi = m_delv_xi; Real_t *delv_eta = m_delv_eta; Real_t *delv_zeta = m_delv_zeta; Real_t *delx_xi = m_delx_xi; Real_t *delx_eta = m_delx_eta; Real_t *delx_zeta = m_delx_zeta; Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vnew[numElem], \ nodelist[numElem8], \ x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ volo[numElem], \ delx_xi[allElem], \ delx_eta[allElem], \ delx_zeta[allElem], \ delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem]) async(0) #else #pragma acc parallel loop present(vnew[numElem], \ nodelist[numElem8], \ x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ volo[numElem], \ delx_xi[allElem], \ delx_eta[allElem], \ delx_zeta[allElem], \ delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem]) #endif #else #pragma omp parallel for firstprivate(numElem) #endif for (i = 0 ; i < numElem ; ++i ) { const Real_t ptiny = (Real_t)(1.e-36) ; Real_t ax,ay,az ; Real_t dxv,dyv,dzv ; const Index_t *elemToNode = &nodelist[i*8]; Index_t n0 = elemToNode[0] ; Index_t n1 = elemToNode[1] ; Index_t n2 = elemToNode[2] ; Index_t n3 = elemToNode[3] ; Index_t n4 = elemToNode[4] ; Index_t n5 = elemToNode[5] ; Index_t n6 = elemToNode[6] ; Index_t n7 = elemToNode[7] ; Real_t x0 = x[n0] ; Real_t x1 = x[n1] ; Real_t x2 = x[n2] ; Real_t x3 = x[n3] ; Real_t x4 = x[n4] ; Real_t x5 = x[n5] ; Real_t x6 = x[n6] ; Real_t x7 = x[n7] ; Real_t y0 = y[n0] ; Real_t y1 = y[n1] ; Real_t y2 = y[n2] ; Real_t y3 = y[n3] ; Real_t y4 = y[n4] ; Real_t y5 = y[n5] ; Real_t y6 = y[n6] ; Real_t y7 = y[n7] ; Real_t z0 = z[n0] ; Real_t z1 = z[n1] ; Real_t z2 = z[n2] ; Real_t z3 = z[n3] ; Real_t z4 = z[n4] ; Real_t z5 = z[n5] ; Real_t z6 = z[n6] ; Real_t z7 = z[n7] ; Real_t xv0 = xd[n0] ; Real_t xv1 = xd[n1] ; Real_t xv2 = xd[n2] ; Real_t xv3 = xd[n3] ; Real_t xv4 = xd[n4] ; Real_t xv5 = xd[n5] ; Real_t xv6 = xd[n6] ; Real_t xv7 = xd[n7] ; Real_t yv0 = yd[n0] ; Real_t yv1 = yd[n1] ; Real_t yv2 = yd[n2] ; Real_t yv3 = yd[n3] ; Real_t yv4 = yd[n4] ; Real_t yv5 = yd[n5] ; Real_t yv6 = yd[n6] ; Real_t yv7 = yd[n7] ; Real_t zv0 = zd[n0] ; Real_t zv1 = zd[n1] ; Real_t zv2 = zd[n2] ; Real_t zv3 = zd[n3] ; Real_t zv4 = zd[n4] ; Real_t zv5 = zd[n5] ; Real_t zv6 = zd[n6] ; Real_t zv7 = zd[n7] ; Real_t vol = volo[i]*vnew[i] ; Real_t norm = (Real_t)(1.0) / ( vol + ptiny ) ; Real_t dxj = (Real_t)(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ; Real_t dyj = (Real_t)(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ; Real_t dzj = (Real_t)(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ; Real_t dxi = (Real_t)( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ; Real_t dyi = (Real_t)( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ; Real_t dzi = (Real_t)( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ; Real_t dxk = (Real_t)( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ; Real_t dyk = (Real_t)( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ; Real_t dzk = (Real_t)( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ; /* find delvk and delxk ( i cross j ) */ ax = dyi*dzj - dzi*dyj ; ay = dzi*dxj - dxi*dzj ; az = dxi*dyj - dyi*dxj ; delx_zeta[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = (Real_t)(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ; dyv = (Real_t)(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ; dzv = (Real_t)(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ; delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ; /* find delxi and delvi ( j cross k ) */ ax = dyj*dzk - dzj*dyk ; ay = dzj*dxk - dxj*dzk ; az = dxj*dyk - dyj*dxk ; delx_xi[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = (Real_t)(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ; dyv = (Real_t)(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ; dzv = (Real_t)(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ; delv_xi[i] = ax*dxv + ay*dyv + az*dzv ; /* find delxj and delvj ( k cross i ) */ ax = dyk*dzi - dzk*dyi ; ay = dzk*dxi - dxk*dzi ; az = dxk*dyi - dyk*dxi ; delx_eta[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = (Real_t)(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ; dyv = (Real_t)(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ; dzv = (Real_t)(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ; delv_eta[i] = ax*dxv + ay*dyv + az*dzv ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ /* * NOTES: This function uses several goto statements. They are used in the * place of breaks. This is the result of a bug in the PGI compiler (version * (13.4-accelerator) in which breaks inside of switches jump out of the omp * loops they are placed in. We decided that using gotos is a more readable * alternative than rewriting them all to if-else blocks. */ static inline void CalcMonotonicQRegionForElems(Int_t r, Real_t vnew[], Real_t ptiny, Index_t allElem) { Real_t monoq_limiter_mult = m_monoq_limiter_mult; Real_t monoq_max_slope = m_monoq_max_slope; Real_t qlc_monoq = m_qlc_monoq; Real_t qqc_monoq = m_qqc_monoq; Index_t *lxim = m_lxim; Index_t *lxip = m_lxip; Index_t *letam = m_letam; Index_t *letap = m_letap; Index_t *lzetam = m_lzetam; Index_t *lzetap = m_lzetap; Real_t *delv_xi = m_delv_xi; Real_t *delv_eta = m_delv_eta; Real_t *delv_zeta = m_delv_zeta; Real_t *delx_xi = m_delx_xi; Real_t *delx_eta = m_delx_eta; Real_t *delx_zeta = m_delx_zeta; Real_t *qq = m_qq; Real_t *ql = m_ql; Real_t *elemMass = m_elemMass; Real_t *volo = m_volo; Real_t *vdov = m_vdov; Index_t regElemSize = m_regElemSize[r]; Index_t *regElemlist = m_regElemlist[r]; volatile Index_t numElem = m_numElem; Int_t *elemBC = m_elemBC; Index_t ielem; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop firstprivate(qlc_monoq, qqc_monoq, \ monoq_limiter_mult, monoq_max_slope, \ ptiny) \ copyin(regElemlist[regElemSize]) \ present(vnew[numElem], \ vdov[numElem], \ delx_xi[allElem], \ delx_eta[allElem], \ delx_zeta[allElem], \ delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem], \ elemMass[numElem], \ volo[numElem], \ lxip[numElem], \ lxim[numElem], \ letam[numElem], \ letap[numElem], \ lzetam[numElem], \ lzetap[numElem], \ ql[numElem], \ qq[numElem], \ elemBC[numElem]) async(0) #else #pragma acc parallel loop firstprivate(qlc_monoq, qqc_monoq, \ monoq_limiter_mult, monoq_max_slope, \ ptiny) \ copyin(regElemlist[regElemSize]) \ present(vnew[numElem], \ vdov[numElem], \ delx_xi[allElem], \ delx_eta[allElem], \ delx_zeta[allElem], \ delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem], \ elemMass[numElem], \ volo[numElem], \ lxip[numElem], \ lxim[numElem], \ letam[numElem], \ letap[numElem], \ lzetam[numElem], \ lzetap[numElem], \ ql[numElem], \ qq[numElem], \ elemBC[numElem]) #endif #else #pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny) #endif for ( ielem = 0 ; ielem < regElemSize; ++ielem ) { Index_t i = regElemlist[ielem]; Real_t qlin, qquad ; Real_t phixi, phieta, phizeta ; Int_t bcMask = elemBC[i]; Real_t delvm, delvp ; /* phixi */ Real_t norm = (Real_t)(1.) / (delv_xi[i]+ ptiny ) ; switch (bcMask & XI_M) { case XI_M_COMM: /* needs comm data */ case 0: delvm = delv_xi[lxim[i]]; goto BCMASK_AND_XI_M; case XI_M_SYMM: delvm = delv_xi[i] ; goto BCMASK_AND_XI_M; case XI_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_XI_M; default: /* ERROR */ ; goto BCMASK_AND_XI_M; } BCMASK_AND_XI_M: switch (bcMask & XI_P) { case XI_P_COMM: /* needs comm data */ case 0: delvp = delv_xi[lxip[i]] ; goto BCMASK_AND_XI_P; case XI_P_SYMM: delvp = delv_xi[i] ; goto BCMASK_AND_XI_P; case XI_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_XI_P; default: /* ERROR */ ; goto BCMASK_AND_XI_P; } BCMASK_AND_XI_P: delvm = delvm * norm ; delvp = delvp * norm ; phixi = (Real_t)(.5) * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phixi ) phixi = delvm ; if ( delvp < phixi ) phixi = delvp ; if ( phixi < (Real_t)(0.)) phixi = (Real_t)(0.) ; if ( phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = (Real_t)(1.) / ( delv_eta[i] + ptiny ) ; switch (bcMask & ETA_M) { case ETA_M_COMM: /* needs comm data */ case 0: delvm = delv_eta[letam[i]] ; goto BCMASK_AND_ETA_M; case ETA_M_SYMM: delvm = delv_eta[i] ; goto BCMASK_AND_ETA_M; case ETA_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_ETA_M; default: /* ERROR */ ; goto BCMASK_AND_ETA_M; } BCMASK_AND_ETA_M: switch (bcMask & ETA_P) { case ETA_P_COMM: /* needs comm data */ case 0: delvp = delv_eta[letap[i]] ; goto BCMASK_AND_ETA_P; case ETA_P_SYMM: delvp = delv_eta[i] ; goto BCMASK_AND_ETA_P; case ETA_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_ETA_P; default: /* ERROR */ ; goto BCMASK_AND_ETA_P; } BCMASK_AND_ETA_P: delvm = delvm * norm ; delvp = delvp * norm ; phieta = (Real_t)(.5) * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phieta ) phieta = delvm ; if ( delvp < phieta ) phieta = delvp ; if ( phieta < (Real_t)(0.)) phieta = (Real_t)(0.) ; if ( phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = (Real_t)(1.) / ( delv_zeta[i] + ptiny ) ; switch (bcMask & ZETA_M) { case ZETA_M_COMM: /* needs comm data */ case 0: delvm = delv_zeta[lzetam[i]] ; goto BCMASK_AND_ZETA_M; case ZETA_M_SYMM: delvm = delv_zeta[i] ; goto BCMASK_AND_ZETA_M; case ZETA_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_ZETA_M; default: /* ERROR */ ; goto BCMASK_AND_ZETA_M; } BCMASK_AND_ZETA_M: switch (bcMask & ZETA_P) { case ZETA_P_COMM: /* needs comm data */ case 0: delvp = delv_zeta[lzetap[i]] ; goto BCMASK_AND_ZETA_P; case ZETA_P_SYMM: delvp = delv_zeta[i] ; goto BCMASK_AND_ZETA_P; case ZETA_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_ZETA_P; default: /* ERROR */ ; goto BCMASK_AND_ZETA_P; } BCMASK_AND_ZETA_P: delvm = delvm * norm ; delvp = delvp * norm ; phizeta = (Real_t)(.5) * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phizeta ) phizeta = delvm ; if ( delvp < phizeta ) phizeta = delvp ; if ( phizeta < (Real_t)(0.)) phizeta = (Real_t)(0.); if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope; /* Remove length scale */ if ( vdov[i] > (Real_t)(0.) ) { qlin = (Real_t)(0.) ; qquad = (Real_t)(0.) ; } else { Real_t delvxxi = delv_xi[i] * delx_xi[i] ; Real_t delvxeta = delv_eta[i] * delx_eta[i] ; Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ; if ( delvxxi > (Real_t)(0.) ) delvxxi = (Real_t)(0.) ; if ( delvxeta > (Real_t)(0.) ) delvxeta = (Real_t)(0.) ; if ( delvxzeta > (Real_t)(0.) ) delvxzeta = (Real_t)(0.) ; Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ; qlin = -qlc_monoq * rho * ( delvxxi * ((Real_t)(1.) - phixi) + delvxeta * ((Real_t)(1.) - phieta) + delvxzeta * ((Real_t)(1.) - phizeta) ) ; qquad = qqc_monoq * rho * ( delvxxi*delvxxi * ((Real_t)(1.) - phixi*phixi) + delvxeta*delvxeta * ((Real_t)(1.) - phieta*phieta) + delvxzeta*delvxzeta * ((Real_t)(1.) - phizeta*phizeta) ) ; } qq[i] = qquad ; ql[i] = qlin ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void CalcMonotonicQForElems(Real_t vnew[], Index_t allElem) { // // initialize parameters // const Real_t ptiny = (Real_t)(1.e-36) ; Index_t r; // // calculate the monotonic q for all regions // for (r=0 ; r<m_numReg ; ++r) { if (m_regElemSize[r] > 0) { CalcMonotonicQRegionForElems(r, vnew, ptiny, allElem); } } } /******************************************/ static inline void CalcQForElems(Real_t vnew[]) { // // MONOTONIC Q option // Index_t numElem = m_numElem ; if (numElem != 0) { int allElem = numElem + /* local elem */ 2*m_sizeX*m_sizeY + /* plane ghosts */ 2*m_sizeX*m_sizeZ + /* row ghosts */ 2*m_sizeY*m_sizeZ ; /* col ghosts */ /* Gradients are allocated globally now to reduce memory transfers to device */ //AllocateGradients(allElem); #if USE_MPI CommRecv(MSG_MONOQ, 3, m_sizeX, m_sizeY, m_sizeZ, true, true) ; #endif /* Calculate velocity gradients */ CalcMonotonicQGradientsForElems(vnew, allElem); #if USE_MPI Real_t *fieldData[3] ; Real_t *delv_xi = m_delv_xi; Real_t *delv_eta = m_delv_eta; Real_t *delv_zeta = m_delv_zeta; #pragma acc data present(delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem]) { #pragma acc update host(delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem]) /* Transfer veloctiy gradients in the first order elements */ /* problem->commElements->Transfer(CommElements::monoQ) ; */ fieldData[0] = delv_xi; fieldData[1] = delv_eta; fieldData[2] = delv_zeta; CommSend(MSG_MONOQ, 3, fieldData, m_sizeX, m_sizeY, m_sizeZ, true, true) ; CommMonoQ() ; } // end acc data #endif CalcMonotonicQForElems(vnew, allElem) ; // Free up memory //DeallocateGradients(); /* Don't allow excessive artificial viscosity */ Index_t idx = -1; Index_t i; for (i=0; i<numElem; ++i) { if ( m_q[i] > m_qstop ) { idx = i ; break ; } } if(idx >= 0) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, QStopError) ; #else exit(QStopError); #endif } } } /******************************************/ static inline void CalcPressureForElems(Real_t* p_new, Real_t* bvc, Real_t* pbvc, Real_t* e_old, Real_t* compression, Real_t *vnewc, Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length, Index_t *regElemList) { volatile Index_t numElem = m_numElem; Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(regElemList[length], \ compression[length], \ pbvc[length], \ p_new[length], \ bvc[length], \ e_old[length], \ vnewc[numElem]) async(0) #else #pragma acc parallel loop present(regElemList[length], \ compression[length], \ pbvc[length], \ p_new[length], \ bvc[length], \ e_old[length], \ vnewc[numElem]) #endif #else #pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax) #endif for (i = 0 ; i < length ; ++i){ Index_t elem = regElemList[i]; // fused loop Real_t c1s = (Real_t)(2.0)/(Real_t)(3.0) ; bvc[i] = c1s * (compression[i] + (Real_t)(1.)); pbvc[i] = c1s; p_new[i] = bvc[i] * e_old[i] ; if (fabs(p_new[i]) < p_cut ) p_new[i] = (Real_t)(0.0) ; if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */ p_new[i] = (Real_t)(0.0) ; if (p_new[i] < pmin) p_new[i] = pmin ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void CalcEnergyForElems(Real_t* p_new, Real_t* e_new, Real_t* q_new, Real_t* bvc, Real_t* pbvc, Real_t* p_old, Real_t* e_old, Real_t* q_old, Real_t* compression, Real_t* compHalfStep, Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq_old, Real_t* ql_old, Real_t rho0, Real_t eosvmax, Int_t length, Index_t *regElemList) { #ifdef USE_UNIFIEDMEM Real_t *pHalfStep = (Real_t*) acc_create_unified(NULL, sizeof(Real_t) * (length)) ; #else Real_t *pHalfStep = (Real_t*) malloc(sizeof(Real_t) * (length)) ; #endif Index_t i; #ifdef _OPENACC volatile Index_t numElem = m_numElem; #pragma acc data create(pHalfStep[length]) { #endif #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(e_new[length], \ e_old[length], \ p_old[length], \ q_old[length], \ delvc[length], \ work[length]) async(0) #else #pragma acc parallel loop present(e_new[length], \ e_old[length], \ p_old[length], \ q_old[length], \ delvc[length], \ work[length]) #endif #else #pragma omp parallel for firstprivate(length, emin) #endif for (i = 0 ; i < length ; ++i) { e_new[i] = e_old[i] - (Real_t)(0.5) * delvc[i] * (p_old[i] + q_old[i]) + (Real_t)(0.5) * work[i]; if (e_new[i] < emin ) { e_new[i] = emin ; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif CalcPressureForElems(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length, regElemList); #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(compHalfStep[length], \ pHalfStep[length], \ delvc[length], \ p_old[length], \ q_old[length], \ ql_old[length], \ qq_old[length], \ q_new[length], \ pbvc[length], \ bvc[length], \ e_new[length]) async(0) #else #pragma acc parallel loop present(compHalfStep[length], \ pHalfStep[length], \ delvc[length], \ p_old[length], \ q_old[length], \ ql_old[length], \ qq_old[length], \ q_new[length], \ pbvc[length], \ bvc[length], \ e_new[length]) #endif #else #pragma omp parallel for firstprivate(length, rho0) #endif for (i = 0 ; i < length ; ++i) { Real_t vhalf = (Real_t)(1.) / ((Real_t)(1.) + compHalfStep[i]) ; if ( delvc[i] > (Real_t)(0.) ) { q_new[i] /* = qq_old[i] = ql_old[i] */ = (Real_t)(0.) ; } else { Real_t ssc = ( pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ; if ( ssc <= (Real_t)(.1111111e-36) ) { ssc = (Real_t)(.3333333e-18) ; } else { ssc = sqrt(ssc) ; } q_new[i] = (ssc*ql_old[i] + qq_old[i]) ; } e_new[i] = e_new[i] + (Real_t)(0.5) * delvc[i] * ( (Real_t)(3.0)*(p_old[i] + q_old[i]) - (Real_t)(4.0)*(pHalfStep[i] + q_new[i])) ; } #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(e_new[length], \ work[length]) async(0) #else #pragma acc parallel loop present(e_new[length], \ work[length]) #endif #else #pragma omp parallel for firstprivate(length, emin, e_cut) #endif for (i = 0 ; i < length ; ++i) { e_new[i] += (Real_t)(0.5) * work[i]; if (fabs(e_new[i]) < e_cut) { e_new[i] = (Real_t)(0.) ; } if ( e_new[i] < emin ) { e_new[i] = emin ; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, regElemList); #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(regElemList[length], \ pHalfStep[length], \ delvc[length], \ pbvc[length], \ e_new[length], \ bvc[length], \ ql_old[length], \ qq_old[length], \ p_old[length], \ q_old[length], \ p_new[length], \ q_new[length], \ vnewc[numElem]) async(0) #else #pragma acc parallel loop present(regElemList[length], \ pHalfStep[length], \ delvc[length], \ pbvc[length], \ e_new[length], \ bvc[length], \ ql_old[length], \ qq_old[length], \ p_old[length], \ q_old[length], \ p_new[length], \ q_new[length], \ vnewc[numElem]) #endif #else #pragma omp parallel for firstprivate(length, rho0, emin, e_cut) #endif for (i = 0 ; i < length ; ++i){ const Real_t sixth = (Real_t)(1.0) / (Real_t)(6.0) ; Index_t elem = regElemList[i]; Real_t q_tilde ; if (delvc[i] > (Real_t)(0.)) { q_tilde = (Real_t)(0.) ; } else { Real_t ssc = ( pbvc[i] * e_new[i] + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ; if ( ssc <= (Real_t)(.1111111e-36) ) { ssc = (Real_t)(.3333333e-18) ; } else { ssc = sqrt(ssc) ; } q_tilde = (ssc*ql_old[i] + qq_old[i]) ; } e_new[i] = e_new[i] - ( (Real_t)(7.0)*(p_old[i] + q_old[i]) - (Real_t)(8.0)*(pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i]*sixth ; if (fabs(e_new[i]) < e_cut) { e_new[i] = (Real_t)(0.) ; } if ( e_new[i] < emin ) { e_new[i] = emin ; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length, regElemList); #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(regElemList[length], \ delvc[length], \ pbvc[length], \ e_new[length], \ vnewc[numElem], \ bvc[length], \ ql_old[length], \ qq_old[length], \ p_new[length], \ q_new[length]) async(0) #else #pragma acc parallel loop present(regElemList[length], \ delvc[length], \ pbvc[length], \ e_new[length], \ vnewc[numElem], \ bvc[length], \ ql_old[length], \ qq_old[length], \ p_new[length], \ q_new[length]) #endif #else #pragma omp parallel for firstprivate(length, rho0, q_cut) #endif for (i = 0 ; i < length ; ++i){ Index_t elem = regElemList[i]; if ( delvc[i] <= (Real_t)(0.) ) { Real_t ssc = ( pbvc[i] * e_new[i] + vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ; if ( ssc <= (Real_t)(.1111111e-36) ) { ssc = (Real_t)(.3333333e-18) ; } else { ssc = sqrt(ssc) ; } q_new[i] = (ssc*ql_old[i] + qq_old[i]) ; if (fabs(q_new[i]) < q_cut) q_new[i] = (Real_t)(0.) ; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif #ifdef _OPENACC } // end acc data #endif #ifdef USE_UNIFIEDMEM if (pHalfStep != NULL) acc_delete_unified(pHalfStep, 0); #else if (pHalfStep != NULL) free(pHalfStep); #endif return ; } /******************************************/ static inline void CalcSoundSpeedForElems(Real_t *ss, Real_t *vnewc, Real_t rho0, Real_t *enewc, Real_t *pnewc, Real_t *pbvc, Real_t *bvc, Real_t ss4o3, Index_t numElem, Int_t len, Index_t *regElemList) { Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vnewc[numElem], \ regElemList[len], \ pbvc[len], \ enewc[len], \ bvc[len], \ pnewc[len], \ ss[numElem]) \ firstprivate(rho0, ss4o3) async(0) #else #pragma acc parallel loop present(vnewc[numElem], \ regElemList[len], \ pbvc[len], \ enewc[len], \ bvc[len], \ pnewc[len], \ ss[numElem]) \ firstprivate(rho0, ss4o3) #endif #else #pragma omp parallel for firstprivate(rho0, ss4o3) #endif for (i = 0; i < len ; ++i) { int elem = regElemList[i]; Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= (Real_t)(.1111111e-36)) { ssTmp = (Real_t)(.3333333e-18); } else { ssTmp = sqrt(ssTmp); } ss[elem] = ssTmp ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } /******************************************/ static inline void EvalEOSForElems(Real_t *vnewc, Int_t numElemReg, Index_t *regElemList, Int_t rep) { Real_t e_cut = m_e_cut ; Real_t p_cut = m_p_cut ; Real_t ss4o3 = m_ss4o3 ; Real_t q_cut = m_q_cut ; Real_t eosvmax = m_eosvmax ; Real_t eosvmin = m_eosvmin ; Real_t pmin = m_pmin ; Real_t emin = m_emin ; Real_t rho0 = m_refdens ; Real_t *e_old = m_e_old ; Real_t *delvc = m_delvc ; Real_t *p_old = m_p_old ; Real_t *q_old = m_q_old ; Real_t *compression = m_compression ; Real_t *compHalfStep = m_compHalfStep ; Real_t *qq_old = m_qq_old ; Real_t *ql_old = m_ql_old ; Real_t *work = m_work ; Real_t *p_new = m_p_new ; Real_t *e_new = m_e_new ; Real_t *q_new = m_q_new ; Real_t *bvc = m_bvc ; Real_t *pbvc = m_pbvc ; Real_t *e = m_e; Real_t *delv = m_delv; Real_t *p = m_p; Real_t *q = m_q; Real_t *qq = m_qq; Real_t *ql = m_ql; Index_t numElem = m_numElem; #ifdef _OPENACC #pragma acc data present(e_old[numElemReg], \ delvc[numElemReg], \ p_old[numElemReg], \ q_old[numElemReg], \ compression[numElemReg], \ compHalfStep[numElemReg], \ qq_old[numElemReg], \ ql_old[numElemReg], \ work[numElemReg], \ p_new[numElemReg], \ e_new[numElemReg], \ q_new[numElemReg], \ bvc[numElemReg], \ pbvc[numElemReg]) \ copyin(regElemList[numElemReg]) #endif { // acc data brace Index_t i; Int_t j; //loop to add load imbalance based on region number for(j = 0; j < rep; j++) { /* compress data, minimal set */ #ifndef _OPENACC #pragma omp parallel #endif //{ // omp parallel brace #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(e_old[numElemReg], \ delvc[numElemReg], \ p_old[numElemReg], \ q_old[numElemReg], \ regElemList[numElemReg], \ qq_old[numElemReg], \ ql_old[numElemReg], \ p[numElem], \ e[numElem], \ q[numElem], \ delv[numElem], \ qq[numElem], \ ql[numElem]) async(0) #else #pragma acc parallel loop present(e_old[numElemReg], \ delvc[numElemReg], \ p_old[numElemReg], \ q_old[numElemReg], \ regElemList[numElemReg], \ qq_old[numElemReg], \ ql_old[numElemReg], \ p[numElem], \ e[numElem], \ q[numElem], \ delv[numElem], \ qq[numElem], \ ql[numElem]) #endif #else #pragma omp for nowait firstprivate(numElemReg) #endif for (i=0; i<numElemReg; ++i) { int elem = regElemList[i]; e_old[i] = e[elem] ; delvc[i] = delv[elem] ; p_old[i] = p[elem] ; q_old[i] = q[elem] ; qq_old[i] = qq[elem] ; ql_old[i] = ql[elem] ; } #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vnewc[numElem], \ compression[numElemReg], \ delvc[numElemReg], \ compHalfStep[numElemReg], \ regElemList[numElemReg]) async(0) #else #pragma acc parallel loop present(vnewc[numElem], \ compression[numElemReg], \ delvc[numElemReg], \ compHalfStep[numElemReg], \ regElemList[numElemReg]) #endif #else #pragma omp for #endif for (i = 0; i < numElemReg ; ++i) { int elem = regElemList[i]; Real_t vchalf ; compression[i] = (Real_t)(1.) / vnewc[elem] - (Real_t)(1.); vchalf = vnewc[elem] - delvc[i] * (Real_t)(.5); compHalfStep[i] = (Real_t)(1.) / vchalf - (Real_t)(1.); } // Fused some loops here to reduce overhead of repeatedly calling small kernels #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vnewc[numElem], \ compHalfStep[numElemReg], \ compression[numElemReg], \ regElemList[numElemReg], \ p_old[numElemReg], \ compHalfStep[numElemReg], \ work[numElemReg]) async(0) #else #pragma acc parallel loop present(vnewc[numElem], \ compHalfStep[numElemReg], \ compression[numElemReg], \ regElemList[numElemReg], \ p_old[numElemReg], \ compHalfStep[numElemReg], \ work[numElemReg]) #endif #else #pragma omp for #endif for(i = 0; i < numElemReg; ++i) { int elem = regElemList[i]; if (eosvmin != 0.0 && vnewc[elem] <= eosvmin) { /* impossible due to calling func? */ compHalfStep[i] = compression[i] ; } if (eosvmax != 0.0 && vnewc[elem] >= eosvmax) { /* impossible due to calling func? */ p_old[i] = (Real_t)(0.) ; compression[i] = (Real_t)(0.) ; compHalfStep[i] = (Real_t)(0.) ; } work[i] = (Real_t)(0.) ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif //} // end omp parallel CalcEnergyForElems(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq_old, ql_old, rho0, eosvmax, numElemReg, regElemList); } // end foreach repetition #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(p_new[numElemReg], \ e_new[numElemReg], \ q_new[numElemReg], \ p[numElem], \ e[numElem], \ q[numElem]) async(0) #else #pragma acc parallel loop present(p_new[numElemReg], \ e_new[numElemReg], \ q_new[numElemReg], \ p[numElem], \ e[numElem], \ q[numElem]) #endif #else #pragma omp parallel for firstprivate(numElemReg) #endif for (i=0; i<numElemReg; ++i) { int elem = regElemList[i]; p[elem] = p_new[i] ; e[elem] = e_new[i] ; q[elem] = q_new[i] ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif Real_t *ss = m_ss; CalcSoundSpeedForElems(ss, vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, numElem, numElemReg, regElemList) ; } // end acc data } /******************************************/ static inline void ApplyMaterialPropertiesForElems(Real_t vnew[]) { Index_t numElem = m_numElem ; Index_t i; if (numElem != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = m_eosvmin ; Real_t eosvmax = m_eosvmax ; #ifdef _OPENACC #pragma acc data present(vnew[numElem]) #else #pragma omp parallel firstprivate(numElem) #endif { // Bound the updated relative volumes with eosvmin/max if (eosvmin != (Real_t)(0.)) { #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop async(0) #else #pragma acc parallel loop #endif #else #pragma omp for #endif for(i=0 ; i<numElem ; ++i) { if (vnew[i] < eosvmin) vnew[i] = eosvmin ; } } if (eosvmax != (Real_t)(0.)) { #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop async(0) #else #pragma acc parallel loop #endif #else #pragma omp for nowait #endif for(i=0 ; i<numElem ; ++i) { if (vnew[i] > eosvmax) vnew[i] = eosvmax ; } } // This check may not make perfect sense in LULESH, but // it's representative of something in the full code - // just leave it in, please Real_t *v = m_v; Real_t vc = 1.; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop reduction(min: vc) \ present(v[numElem]) async(0) #else #pragma acc parallel loop reduction(min: vc) \ present(v[numElem]) #endif #else #pragma omp for nowait private(vc) reduction(min: vc) #endif for (i=0; i<numElem; ++i) { vc = v[i]; if (eosvmin != (Real_t)(0.)) { if (vc < eosvmin) vc = eosvmin ; } if (eosvmax != (Real_t)(0.)) { if (vc > eosvmax) vc = eosvmax ; } } #ifdef USE_ASYNC #pragma acc wait(0) #endif if (vc <= 0.) { #if USE_MPI MPI_Abort(MPI_COMM_WORLD, VolumeError) ; #else exit(VolumeError); #endif } } // end acc data Int_t r; for (r=0 ; r<m_numReg ; r++) { int numElemReg = m_regElemSize[r]; int *regElemList = m_regElemlist[r]; Int_t rep; //Determine load imbalance for this region //round down the number with lowest cost if(r < m_numReg/2) rep = 1; //you don't get an expensive region unless you at least have 5 regions else if(r < (m_numReg - (m_numReg+15)/20)) rep = 1 + m_cost; //very expensive regions else rep = 10 * (1+ m_cost); //[DEBUG by Seyong Lee] If-statement is added to invoke EvalEOSForElems() //only if numElemReg > 0. if( numElemReg > 0 ) { EvalEOSForElems(vnew, numElemReg, regElemList, rep); } } } } /******************************************/ static inline void UpdateVolumesForElems(Real_t *vnew, Real_t *v, Real_t v_cut, Index_t length) { if (length != 0) { Index_t i; #ifdef _OPENACC #ifdef USE_ASYNC #pragma acc parallel loop present(vnew[length], \ v[length]) async(0) #else #pragma acc parallel loop present(vnew[length], \ v[length]) #endif #else #pragma omp parallel for firstprivate(length, v_cut) #endif for(i=0 ; i<length ; ++i) { Real_t tmpV = vnew[i] ; if ( fabs(tmpV - (Real_t)(1.0)) < v_cut ) tmpV = (Real_t)(1.0) ; v[i] = tmpV ; } #ifdef USE_ASYNC #pragma acc wait(0) #endif } return ; } /******************************************/ static inline void LagrangeElements(Index_t numElem) { Real_t *vnew = m_vnew; /* new relative vol -- temp */ CalcLagrangeElements(vnew) ; /* Calculate Q. (Monotonic q option requires communication) */ CalcQForElems(vnew) ; ApplyMaterialPropertiesForElems(vnew) ; UpdateVolumesForElems(vnew, m_v, m_v_cut, numElem) ; } /******************************************/ static inline void CalcCourantConstraintForElems(Int_t length, Index_t *regElemlist, Real_t *ss, Real_t *vdov, Real_t *arealg, Real_t qqc, Real_t* dtcourant, Index_t numElem) { #if !defined(_OPENACC) && defined(_OPENMP) Index_t threads = omp_get_max_threads(); static Index_t *courant_elem_per_thread; static Real_t *dtcourant_per_thread; static bool first = true; if (first) { courant_elem_per_thread = (Index_t*) calloc(threads, sizeof(Index_t)); dtcourant_per_thread = (Real_t*) calloc(threads, sizeof(Real_t)); first = false; } #else Index_t threads = 1; Index_t courant_elem_per_thread[1]; Real_t dtcourant_per_thread[1]; #endif Index_t i; #if !defined(_OPENACC) && defined(_OPENMP) #pragma omp parallel firstprivate(length, qqc) #endif { Real_t qqc2 = (Real_t)(64.0) * qqc * qqc ; Real_t dtcourant_tmp = *dtcourant; Index_t courant_elem = -1 ; #if !defined(_OPENACC) && defined(_OPENMP) Index_t thread_num = omp_get_thread_num(); #else Index_t thread_num = 0; #endif #if !defined(_OPENACC) && defined(_OPENMP) #pragma omp for #endif for (i = 0 ; i < length ; ++i) { Index_t indx = regElemlist[i] ; Real_t dtf = ss[indx] * ss[indx] ; if ( vdov[indx] < (Real_t)(0.) ) { dtf = dtf + qqc2 * arealg[indx] * arealg[indx] * vdov[indx] * vdov[indx] ; } dtf = SQRT(dtf) ; dtf = arealg[indx] / dtf ; if (vdov[indx] != (Real_t)(0.)) { if ( dtf < dtcourant_tmp ) { dtcourant_tmp = dtf ; courant_elem = indx ; } } } dtcourant_per_thread[thread_num] = dtcourant_tmp ; courant_elem_per_thread[thread_num] = courant_elem ; } for (i = 1; i < threads; ++i) { if (dtcourant_per_thread[i] < dtcourant_per_thread[0] ) { dtcourant_per_thread[0] = dtcourant_per_thread[i]; courant_elem_per_thread[0] = courant_elem_per_thread[i]; } } if (courant_elem_per_thread[0] != -1) { *dtcourant = dtcourant_per_thread[0] ; } return ; } /******************************************/ static inline void CalcHydroConstraintForElems(Int_t length, Index_t *regElemlist, Real_t *vdov, Real_t dvovmax, Real_t* dthydro, Index_t numElem) { /* ACC: vdov was updated in CalcCourantConstraintForElems so we don't need to update it again. */ #if !defined(_OPENACC) && defined(_OPENMP) Index_t threads = omp_get_max_threads(); static Index_t *hydro_elem_per_thread; static Real_t *dthydro_per_thread; static bool first = true; if (first) { hydro_elem_per_thread = (Index_t*) calloc(threads, sizeof(Index_t)); dthydro_per_thread = (Real_t*) calloc(threads, sizeof(Real_t)); first = false; } #else Index_t threads = 1; Index_t hydro_elem_per_thread[1]; Real_t dthydro_per_thread[1]; #endif Index_t i; #if !defined(_OPENACC) && defined(_OPENMP) #pragma omp parallel firstprivate(length, dvovmax) #endif { Real_t dthydro_tmp = *dthydro ; Index_t hydro_elem = -1 ; #if !defined(_OPENACC) && defined(_OPENMP) Index_t thread_num = omp_get_thread_num(); #else Index_t thread_num = 0; #endif #if !defined(_OPENACC) && defined(_OPENMP) #pragma omp for #endif for (i = 0 ; i < length ; ++i) { Index_t indx = regElemlist[i] ; if (vdov[indx] != (Real_t)(0.)) { Real_t dtdvov = dvovmax / (FABS(vdov[indx])+(Real_t)(1.e-20)) ; if ( dthydro_tmp > dtdvov ) { dthydro_tmp = dtdvov ; hydro_elem = indx ; } } } dthydro_per_thread[thread_num] = dthydro_tmp ; hydro_elem_per_thread[thread_num] = hydro_elem ; } for (i = 1; i < threads; ++i) { if(dthydro_per_thread[i] < dthydro_per_thread[0]) { dthydro_per_thread[0] = dthydro_per_thread[i]; hydro_elem_per_thread[0] = hydro_elem_per_thread[i]; } } if (hydro_elem_per_thread[0] != -1) { *dthydro = dthydro_per_thread[0] ; } return ; } /******************************************/ static inline void CalcTimeConstraintsForElems() { // Initialize conditions to a very large value m_dtcourant = 1.0e+20; m_dthydro = 1.0e+20; Index_t r; /* wait for async mem updates to finish */ #ifndef USE_UNIFIEDMEM #pragma acc wait #endif for (r=0 ; r < m_numReg ; ++r) { /* evaluate time constraint */ CalcCourantConstraintForElems(m_regElemSize[r], m_regElemlist[r], m_ss, m_vdov, m_arealg, m_qqc, &m_dtcourant, m_numElem) ; /* check hydro constraint */ CalcHydroConstraintForElems(m_regElemSize[r], m_regElemlist[r], m_vdov, m_dvovmax, &m_dthydro, m_numElem); } } /******************************************/ static inline void LagrangeLeapFrog() { Index_t numElem = m_numElem; #ifdef SEDOV_SYNC_POS_VEL_LATE Real_t *fieldData[6] ; /* wait for async device update to complete */ #ifndef USE_UNIFIEDMEM #pragma acc wait #endif #endif //[DEBUG by Seyong Lee] Below definitions are moved out of //the above #ifdef macro region. volatile Index_t numNode = m_numNode; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; /* calculate nodal forces, accelerations, velocities, positions, with * applied boundary conditions and slide surface considerations */ LagrangeNodal(); #pragma acc data present(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) { #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_EARLY /* wait for async device update to complete (in LagrangeNodal) */ #ifndef USE_UNIFIEDMEM #pragma acc wait #endif #endif #ifdef SEDOV_SYNC_POS_VEL_LATE /* asynchronously update on host before MPI comm */ volatile int up = 1; #ifdef USE_UNIFIEDMEM #pragma acc update host(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) #else #pragma acc update host(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) \ async(up) #endif #endif #endif /* calculate element quantities (i.e. velocity gradient & q), and update * material states */ LagrangeElements(numElem); // update values for CalcTimeConstraintsForElems as early as possible #ifdef _OPENACC Real_t *ss = m_ss; Real_t *vdov = m_vdov; Real_t *arealg = m_arealg; #pragma acc data present(ss[numElem], \ vdov[numElem], \ arealg[numElem]) { #ifdef USE_UNIFIEDMEM #pragma acc update host(ss[numElem], \ vdov[numElem], \ arealg[numElem]) #else #pragma acc update host(ss[numElem], \ vdov[numElem], \ arealg[numElem]) \ async #endif } #endif #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_LATE #ifndef USE_UNIFIEDMEM #pragma acc wait(up) #endif CommRecv(MSG_SYNC_POS_VEL, 6, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, false, false) ; fieldData[0] = x; fieldData[1] = y; fieldData[2] = z; fieldData[3] = xd; fieldData[4] = yd; fieldData[5] = zd; CommSend(MSG_SYNC_POS_VEL, 6, fieldData, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, false, false) ; #endif #endif CalcTimeConstraintsForElems(); #if USE_MPI #ifdef SEDOV_SYNC_POS_VEL_LATE CommSyncPosVel() ; #ifdef USE_UNIFIEDMEM #pragma acc update device(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) #else #pragma acc update device(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode]) \ async #endif #endif #endif } // end acc data } /******************************************/ int main(int argc, char *argv[]) { int numRanks ; int myRank ; struct cmdLineOpts opts; #if USE_MPI Real_t *nodalMass; MPI_Init(&argc, &argv) ; MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ; MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ; printf("[%s:%d] [%d/%d]\n", __FILE__, __LINE__, myRank, numRanks); #else numRanks = 1; myRank = 0; #endif #if LULESH_DUMP_OUTPUT FILE *fp; int i; #endif /* Set defaults that can be overridden by command line opts */ opts.its = 9999999; opts.nx = 30; opts.numReg = 11; opts.numFiles = (int)(numRanks+10)/9; opts.showProg = 0; opts.quiet = 0; opts.viz = 0; opts.balance = 1; opts.cost = 1; ParseCommandLineOptions(argc, argv, myRank, &opts); if ((myRank == 0) && (opts.quiet == 0)) { printf("Running problem size %d^3 per domain until completion\n", opts.nx); printf("Num processors: %d\n", numRanks); #if !defined(_OPENACC) && defined(_OPENMP) printf("Num threads: %d\n", omp_get_max_threads()); #endif printf("Total number of elements: %d\n\n", numRanks*opts.nx*opts.nx*opts.nx); printf("To run other sizes, use -s <integer>.\n"); printf("To run a fixed number of iterations, use -i <integer>.\n"); printf("To run a more or less balanced region set, use -b <integer>.\n"); printf("To change the relative costs of regions, use -c <integer>.\n"); printf("To print out progress, use -p\n"); printf("To write an output file for VisIt, use -v\n"); printf("See help (-h) for more options\n\n"); } // Set up the mesh and decompose. Assumes regular cubes for now Int_t col, row, plane, side; InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side); // Build the main data structure and initialize it Domain(numRanks, col, row, plane, opts.nx, side, opts.numReg, opts.balance, opts.cost) ; #if USE_MPI nodalMass = m_nodalMass; // Initial domain boundary communication CommRecv(MSG_COMM_SBN, 1, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, true, false) ; CommSend(MSG_COMM_SBN, 1, &nodalMass, m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1, true, false) ; CommSBN(1, &nodalMass) ; // End initialization MPI_Barrier(MPI_COMM_WORLD); #endif // BEGIN timestep to solution */ Real_t start; #if USE_MPI start = MPI_Wtime(); #else start = clock(); #endif /* tmp region-based arrays */ int maxRegSize = 0; Int_t r; for (r=0 ; r < m_numReg ; r++) { maxRegSize = MAX(maxRegSize, m_regElemSize[r]); } AllocateRegionTmps(maxRegSize); #ifdef _OPENACC Index_t numElem = m_numElem; Index_t numElem8 = numElem * 8; Index_t numNode = m_numNode; Index_t size = m_sizeX; Index_t numNodeBC = (size+1)*(size+1) ; Index_t allElem = numElem + /* local elem */ 2*m_sizeX*m_sizeY + /* plane ghosts */ 2*m_sizeX*m_sizeZ + /* row ghosts */ 2*m_sizeY*m_sizeZ ; /* col ghosts */ Real_t *fx = m_fx; Real_t *fy = m_fy; Real_t *fz = m_fz; // load tmp arrays Real_t *fx_elem = m_fx_elem; Real_t *fy_elem = m_fy_elem; Real_t *fz_elem = m_fz_elem; Real_t *dvdx = m_dvdx; Real_t *dvdy = m_dvdy; Real_t *dvdz = m_dvdz; Real_t *x8n = m_x8n; Real_t *y8n = m_y8n; Real_t *z8n = m_z8n; Real_t *sigxx = m_sigxx; Real_t *sigyy = m_sigyy; Real_t *sigzz = m_sigzz; Real_t *determ = m_determ; Real_t *dxx = m_dxx; Real_t *dyy = m_dyy; Real_t *dzz = m_dzz; Real_t *vnew = m_vnew; Real_t *delv_xi = m_delv_xi; Real_t *delv_eta = m_delv_eta; Real_t *delv_zeta = m_delv_zeta; Real_t *delx_xi = m_delx_xi; Real_t *delx_eta = m_delx_eta; Real_t *delx_zeta = m_delx_zeta; Real_t *e_old = m_e_old ; Real_t *delvc = m_delvc ; Real_t *p_old = m_p_old ; Real_t *q_old = m_q_old ; Real_t *compression = m_compression ; Real_t *compHalfStep = m_compHalfStep ; Real_t *qq_old = m_qq_old ; Real_t *ql_old = m_ql_old ; Real_t *work = m_work ; Real_t *p_new = m_p_new ; Real_t *e_new = m_e_new ; Real_t *q_new = m_q_new ; Real_t *bvc = m_bvc ; Real_t *pbvc = m_pbvc ; Real_t *x = m_x; Real_t *y = m_y; Real_t *z = m_z; Real_t *xd = m_xd; Real_t *yd = m_yd; Real_t *zd = m_zd; Real_t *xdd = m_xdd; Real_t *ydd = m_ydd; Real_t *zdd = m_zdd; Real_t *v = m_v; Real_t *volo = m_volo; Real_t *delv = m_delv; Real_t *vdov = m_vdov; Real_t *arealg = m_arealg; #if !USE_MPI /* nodalMass already defined if USE_MPI */ Real_t *nodalMass = m_nodalMass; #endif Real_t *elemMass = m_elemMass; Real_t *ss = m_ss; Index_t *lxim = m_lxim; Index_t *lxip = m_lxip; Index_t *letam = m_letam; Index_t *letap = m_letap; Index_t *lzetam = m_lzetam; Index_t *lzetap = m_lzetap; Real_t *p = m_p; Real_t *e = m_e; Real_t *q = m_q; Real_t *qq = m_qq; Real_t *ql = m_ql; Index_t *symmX = m_symmX; Index_t *symmY = m_symmY; Index_t *symmZ = m_symmZ; Index_t *nodelist = m_nodelist; Index_t *nodeElemCount = m_nodeElemCount; Index_t *nodeElemStart = m_nodeElemStart; Index_t *nodeElemCornerList = m_nodeElemCornerList; Index_t *elemBC = m_elemBC; Index_t nCorner = nodeElemStart[numNode-1] + nodeElemCount[numNode-1]; /* Since these are only found in pragmas they'll be optimized out -- this forces them to remain. If we instead switch all of these pointers to volatile some crashes continue happening, so this seems to work best for now. */ volatile Index_t dummyI = nodelist[numElem8-1] + nodeElemCount[numNode-1] + nodeElemStart[numNode-1] + nodeElemCornerList[nCorner-1] + lxim[numElem-1] + lxip[numElem-1] + letam[numElem-1] + letap[numElem-1] + lzetam[numElem-1] + lzetap[numElem-1] + elemBC[numElem-1]; if(!m_symmXempty) dummyI += symmX[numNodeBC-1]; if(!m_symmYempty) dummyI += symmY[numNodeBC-1]; if(!m_symmZempty) dummyI += symmZ[numNodeBC-1]; volatile Real_t dummyR = x[numNode-1] + y[numNode-1] + z[numNode-1] + xd[numNode-1] + yd[numNode-1] + zd[numNode-1] + xdd[numNode-1] + ydd[numNode-1] + zdd[numNode-1] + fx[numNode-1] + fy[numNode-1] + fz[numNode-1] + fx_elem[numElem8-1] + fy_elem[numElem8-1] + fz_elem[numElem8-1] + dvdx[numElem8-1] + dvdy[numElem8-1] + dvdz[numElem8-1] + x8n[numElem8-1] + y8n[numElem8-1] + z8n[numElem8-1] + sigxx[numElem-1] + sigyy[numElem-1] + sigzz[numElem-1] + dxx[numElem-1] + dyy[numElem-1] + dzz[numElem-1] + determ[numElem-1] + vnew[numElem-1] + delv_xi[allElem-1] + delv_xi[allElem-1] + delv_eta[allElem-1] + delv_zeta[allElem-1] + delx_xi[allElem-1] + delx_eta[allElem-1] + delx_zeta[allElem-1] + v[numElem-1] + volo[numElem-1] + delv[numElem-1] + arealg[numElem-1] + vdov[numElem-1] + ss[numElem-1] + p[numElem-1] + e[numElem-1] + q[numElem-1] + qq[numElem-1] + ql[numElem-1] + elemMass[numElem-1] + nodalMass[numNode-1] + e_old[maxRegSize-1] + delvc[maxRegSize-1] + p_old[maxRegSize-1] + q_old[maxRegSize-1] + compression[maxRegSize-1] + compHalfStep[maxRegSize-1] + qq_old[maxRegSize-1] + ql_old[maxRegSize-1] + work[maxRegSize-1] + p_new[maxRegSize-1] + e_new[maxRegSize-1] + q_new[maxRegSize-1] + bvc[maxRegSize-1] + pbvc[maxRegSize-1]; if(myRank == 0) { printf("Copying data to device..."); fflush(stdout); } #pragma acc data create(fx[numNode], \ fy[numNode], \ fz[numNode], \ fx_elem[numElem8], \ fy_elem[numElem8], \ fz_elem[numElem8], \ dvdx[numElem8], \ dvdy[numElem8], \ dvdz[numElem8], \ x8n[numElem8], \ y8n[numElem8], \ z8n[numElem8], \ sigxx[numElem], \ sigyy[numElem], \ sigzz[numElem], \ determ[numElem], \ dxx[numElem], \ dyy[numElem], \ dzz[numElem], \ vnew[numElem], \ delx_xi[allElem], \ delx_eta[allElem], \ delx_zeta[allElem], \ delv_xi[allElem], \ delv_eta[allElem], \ delv_zeta[allElem], \ e_old[maxRegSize], \ delvc[maxRegSize], \ p_old[maxRegSize], \ q_old[maxRegSize], \ compression[maxRegSize], \ compHalfStep[maxRegSize], \ qq_old[maxRegSize], \ ql_old[maxRegSize], \ work[maxRegSize], \ p_new[maxRegSize], \ e_new[maxRegSize], \ q_new[maxRegSize], \ bvc[maxRegSize], \ pbvc[maxRegSize]) \ copy(x[numNode], \ y[numNode], \ z[numNode], \ xd[numNode], \ yd[numNode], \ zd[numNode], \ p[numElem], \ e[numElem]) \ copyin(symmX[numNodeBC], \ symmY[numNodeBC], \ symmZ[numNodeBC], \ xdd[numNode], \ ydd[numNode], \ zdd[numNode], \ v[numElem], \ volo[numElem], \ delv[numElem], \ arealg[numElem], \ vdov[numElem], \ ss[numElem], \ q[numElem], \ qq[numElem], \ ql[numElem], \ nodalMass[numNode], \ elemMass[numElem], \ lxim[numElem], \ lxip[numElem], \ letam[numElem], \ letap[numElem], \ lzetam[numElem], \ lzetap[numElem], \ nodelist[numElem8], \ nodeElemCount[numNode], \ nodeElemStart[numNode], \ nodeElemCornerList[nCorner], \ elemBC[numElem]) #endif { #ifdef _OPENACC if(myRank == 0) { printf("done.\n"); fflush(stdout); } #endif while((m_time < m_stoptime) && (m_cycle < opts.its)) { TimeIncrement() ; LagrangeLeapFrog() ; if ((opts.showProg != 0) && (opts.quiet == 0) && (myRank == 0)) { printf("cycle = %d, time = %e, dt=%e\n", m_cycle, (double)(m_time), (double)(m_deltatime) ) ; } } } // end acc data // Use reduced max elapsed time Real_t elapsed_time; #if USE_MPI elapsed_time = MPI_Wtime() - start; #else elapsed_time = (clock() - start) / CLOCKS_PER_SEC; #endif double elapsed_timeG; #if USE_MPI MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); #else elapsed_timeG = elapsed_time; #endif #if LULESH_DUMP_OUTPUT fp = fopen("lulesh.dump", "w"); for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", x[i]); for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", y[i]); for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", z[i]); fclose(fp); #endif if ((myRank == 0) && (opts.quiet == 0)) { VerifyAndWriteFinalOutput(elapsed_timeG, opts.nx, numRanks); } #if USE_MPI MPI_Finalize() ; #endif // OpenACC - release device ptrs ReleaseDeviceMem(); return 0 ; }
lloyd_parallel_partitioner.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Carlos A. Roig // #if !defined(KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED) #define KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED // System includes #include <string> #include <iostream> #include <cmath> #include <algorithm> #include <time.h> #include <stdio.h> #include <stdlib.h> // Project includes #include "mpi.h" #include "spatial_containers/tree.h" #include "spatial_containers/cell.h" // Application includes #include "custom_utilities/bins_dynamic_objects_mpi.h" #include "processes/graph_coloring_process.h" // Graph coloring #include "processes/graph_coloring_process.h" // TODO: This procedure seems unused. Maybe can be removed. int compareFunction(const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ template<class TConfigure> class LloydParallelPartitioner { public: ///@name Type Definitions ///@{ enum { Dimension = TConfigure::Dimension }; // Point typedef TConfigure Configure; typedef typename Configure::PointType PointType; // typedef typename TConfigure::ResultNumberIteratorType ResultNumberIteratorType; // Container typedef typename Configure::PointerType PointerType; typedef typename Configure::ContainerType ContainerType; typedef typename ContainerType::iterator IteratorType; typedef typename Configure::DistanceIteratorType DistanceIteratorType; typedef typename Configure::ResultContainerType ResultContainerType; typedef typename Configure::ElementsContainerType ElementsContainerType; // typedef typename Configure::ResultPointerType ResultPointerType; typedef typename Configure::ResultIteratorType ResultIteratorType; typedef typename Configure::PointerContactType PointerContactType; // typedef typename Configure::PointerTypeIterator PointerTypeIterator; typedef GlobalPointersVector<Element> ParticleWeakVector; // Search Structures typedef Cell<Configure> CellType; typedef std::vector<CellType> CellContainerType; typedef typename CellContainerType::iterator CellContainerIterator; typedef TreeNode<Dimension, PointType, PointerType, IteratorType, typename Configure::DistanceIteratorType> TreeNodeType; typedef typename TreeNodeType::CoordinateType CoordinateType; // double typedef typename TreeNodeType::SizeType SizeType; // std::size_t typedef typename TreeNodeType::IndexType IndexType; // std::size_t typedef Tvector<IndexType,Dimension> IndexArray; typedef Tvector<SizeType,Dimension> SizeArray; typedef Tvector<CoordinateType,Dimension> CoordinateArray; ///Contact Pair typedef typename Configure::ContainerContactType ContainerContactType; typedef typename Configure::IteratorContactType IteratorContactType; ///typedef TreeNodeType LeafType; typedef typename TreeNodeType::IteratorIteratorType IteratorIteratorType; typedef typename TreeNodeType::SearchStructureType SearchStructureType; // Graph coloring process type typedef typename GraphColoringProcess::GraphType GraphType; /// Pointer definition of BinsObjectDynamic KRATOS_CLASS_POINTER_DEFINITION(LloydParallelPartitioner); ///@} ///@name Life Cycle ///@{ /// Default constructor. LloydParallelPartitioner(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd) : mNumberOfObjects(ObjectsEnd-ObjectsBegin), mObjectsBegin(ObjectsBegin), mObjectsEnd(ObjectsEnd) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); // std::cout << "Begining partitioning" << std::endl; mpPartitionBins = new BinsObjectDynamicMpi<TConfigure>(mObjectsBegin, mObjectsEnd); mNumberOfCells = mpPartitionBins->GetCellContainer().size(); if(mNumberOfCells < mpi_size) { KRATOS_ERROR << "Error: Number of cells in the bins must be at least equal to mpi_size. " << mNumberOfCells << std::endl; } if(mNumberOfCells % mpi_size) { // KRATOS_WARNING << "Warning: Number of cells is not multiple of mpi_size. Heavy imbalance may occur." << std::endl; std::cout << "Warning: Number of cells is not multiple of mpi_size. Heavy imbalance may occur. " << mNumberOfCells << std::endl; } if(mNumberOfCells < 10 * mpi_size) { // KRATOS_WARNING << "Warning: Number of cells is small. Partition Shape may be sub-optimal." << std::endl; std::cout << "Warning: Number of cells is small. Partition Shape may be sub-optimal. " << mNumberOfCells << std::endl; } } double ReduceMaxRadius(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd) { // Max Radius Ugly fix double local_max_radius = 0.0f; double max_radius = 0.0f; for (IteratorType ObjectItr = ObjectsBegin; ObjectItr != ObjectsEnd; ObjectItr++) { const double Radius = TConfigure::GetObjectRadius(*ObjectItr, 0.0f); if(Radius > local_max_radius) local_max_radius = Radius; } MPI_Allreduce(&local_max_radius, &max_radius, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); return max_radius; } void SerialPartition() { std::vector<int> mpiSendObjectsPerCell(mNumberOfCells, 0); std::vector<int> mpiRecvObjectsPerCell(mNumberOfCells, 0); std::vector<int> CellPartition(mNumberOfCells, 0); std::vector<int> ObjectsPerPartition(mpi_size, 0); int mpiSendNumberOfObjects = mObjectsEnd - mObjectsBegin; int mpiRecvNumberOfObjects = 0; PointType ObjectCenter; PointType Low, High; SearchStructureType Box; // Calculate objects per cell for(std::size_t i = 0; i < (std::size_t)mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto cellId = mpPartitionBins->CalculateIndex(ObjectCenter); mpiSendObjectsPerCell[cellId]++; } MPI_Allreduce(&mpiSendNumberOfObjects, &mpiRecvNumberOfObjects, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&mpiSendObjectsPerCell[0], &mpiRecvObjectsPerCell[0], mNumberOfCells, MPI_INT, MPI_SUM, MPI_COMM_WORLD); //int MeanObjectsPerPartition = mpiRecvNumberOfObjects / mpi_size; // std::cout << "mpiRecvNumberOfObjects: " << mpiRecvNumberOfObjects << " MeanObjectsPerPartition: " << MeanObjectsPerPartition << std::endl; // Assing each cell to the closest partition center // TODO: this is currently very unbalanced for(std::size_t cellId = 0; cellId < (std::size_t) mNumberOfCells; cellId++) { ObjectsPerPartition[cellId] += mpiRecvObjectsPerCell[cellId]; CellPartition[cellId] = cellId; } std::cout << "Partititon " << mpi_rank << ": " << ObjectsPerPartition[mpi_rank] << std::endl; // Assign the partition to the objects based on their cell for(std::size_t i = 0; i < (std::size_t) mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto cellId = mpPartitionBins->CalculateIndex(ObjectCenter); (*ObjectItr)->GetValue(PARTITION_INDEX) = CellPartition[cellId]; for (unsigned int j = 0; j < (*ObjectItr)->GetGeometry().PointsNumber(); j++) { ModelPart::NodeType::Pointer NodePtr = (*ObjectItr)->GetGeometry().pGetPoint(j); NodePtr->FastGetSolutionStepValue(PARTITION_INDEX) = CellPartition[cellId]; } } // std::cout << "Ending partitioning" << std::endl; } void VoronoiiPartition() { std::vector<int> mpiSendObjectsPerCell(mNumberOfCells, 0); std::vector<int> mpiRecvObjectsPerCell(mNumberOfCells, 0); std::vector<int> CellPartition(mNumberOfCells, 0); std::vector<double> CellDistances(mNumberOfCells, std::numeric_limits<double>::max()); std::vector<double> mpiSendCellCenter(mNumberOfCells * Dimension, 0.0f); std::vector<double> mpiRecvCellCenter(mNumberOfCells * Dimension, 0.0f); std::vector<int> CellsPerPartition(mpi_size, 0); std::vector<int> ObjectsPerPartition(mpi_size, 0); std::vector<PointType> PartitionCenters(mpi_size); std::vector<double> mpiSendPartCenter(mpi_size * Dimension, 0.0f); std::vector<double> mpiRecvPartCenter(mpi_size * Dimension, 0.0f); std::vector<int> mpiSendPartNum(mpi_size, 0); std::vector<int> mpiRecvPartNum(mpi_size, 0); PointType ObjectCenter; // 1 - Calculate the centers of the cells based on the objects inside // TODO: Parallelize this (non-trivial) for(std::size_t i = 0; i < mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto CellIndex = mpPartitionBins->CalculateIndex(ObjectCenter); mpiSendObjectsPerCell[CellIndex]++; for(int d = 0; d < Dimension; d++) { mpiSendCellCenter[CellIndex*Dimension+d] += ObjectCenter[d]; } } // 1.1 - Communicate the number of objects per cell and the local sum of object coordinates MPI_Allreduce(&mpiSendObjectsPerCell[0], &mpiRecvObjectsPerCell[0], mNumberOfCells * Dimension, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&mpiSendCellCenter[0], &mpiRecvCellCenter[0], mNumberOfCells * Dimension, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // 1.2 - Obtain the wheighted center of each cell with the data of all processes #pragma omp parallel for for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { for(int d = 0; d < Dimension; d++) { mpiRecvCellCenter[cellId*Dimension+d] /= mpiRecvObjectsPerCell[cellId]; } } // 2 - Assign a random origin to each partition. auto minPoint = mpPartitionBins->GetMinPoint(); auto maxPoint = mpPartitionBins->GetMaxPoint(); auto boxSize = maxPoint - minPoint; // Change this if we want real random // !!!!!MAKE SURE THIS IS THE SAME ON EVERY PARTITION OR IT WON'T WORK!!!!! std::srand(256); for(int i = 0; i < mpi_size; i++) { for(int d = 0; d < Dimension; d++) { PartitionCenters[i][d] = minPoint[d] + ((double)std::rand() / (double)RAND_MAX) * boxSize[d]; } } // While not converged auto MaxIterations = 1e1; for(std::size_t iterations = 0; iterations < MaxIterations; iterations++ ) { // Assing each cell to the closest partition center for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { if(mpiRecvObjectsPerCell[cellId] != 0) { for(int i = 0; i < mpi_size; i++) { double cubeDistance = 0.0f; for(int d = 0; d < Dimension; d++) { // Manhattan distance shoudl prevent problems with the discretization of the space cubeDistance += std::abs(mpiRecvCellCenter[cellId*Dimension+d] - PartitionCenters[i][d]); } if(cubeDistance < CellDistances[cellId]) { CellDistances[cellId] = cubeDistance; CellPartition[cellId] = i; } } } } // At this point no synch should be needed // Update the center of the partitions for(int i = 0; i < mpi_size; i++) { CellsPerPartition[i] = 0; ObjectsPerPartition[i] = 0; for(int d = 0; d < Dimension; d++) { PartitionCenters[i][d] = 0.0f; } } // if(mpi_rank == 0) { // std::cout << mNumberOfCells << std::endl; // } for(std::size_t cellId = 0; cellId < mNumberOfCells; cellId++) { if(mpiRecvObjectsPerCell[cellId] != 0) { CellsPerPartition[CellPartition[cellId]]++; ObjectsPerPartition[CellPartition[cellId]] += mpiRecvObjectsPerCell[cellId]; for(int d = 0; d < Dimension; d++) { PartitionCenters[CellPartition[cellId]][d] += mpiRecvCellCenter[cellId*Dimension+d]; } } // if(mpi_rank == 0) { // for(int i = 0; i < mpi_size; i++) { // std::cout << "Iteration: " << cellId << " Partition " << i << " has " << CellsPerPartition[i] << " Cells" << std::endl; // } // } } for(std::size_t partId = 0; partId < mpi_size; partId++) { for(int d = 0; d < Dimension; d++) { PartitionCenters[partId][d] /= CellsPerPartition[partId]++; } } } if(mpi_rank == 0) { std::cout << mNumberOfCells << std::endl; for(int i = 0; i < mpi_size; i++) { std::cout << "Partition " << i << " has " << CellsPerPartition[i] << " Cells" << std::endl; } } // Assign the partition to the objects based on their cell for(std::size_t i = 0; i < mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateCenter(*ObjectItr, ObjectCenter); auto CellIndex = mpPartitionBins->CalculateIndex(ObjectCenter); (*ObjectItr)->GetValue(PARTITION_INDEX) = CellPartition[CellIndex]; for (unsigned int i = 0; i < (*ObjectItr)->GetGeometry().PointsNumber(); i++) { ModelPart::NodeType::Pointer NodePtr = (*ObjectItr)->GetGeometry().pGetPoint(i); NodePtr->FastGetSolutionStepValue(PARTITION_INDEX) = CellPartition[CellIndex]; } } std::cout << "Ending partitioning" << std::endl; } void UpdateDomainGraph(IteratorType const& ObjectsBegin, IteratorType const& ObjectsEnd, GraphType & domainGraph) { PointType ObjectCenter; PointType Low, High; SearchStructureType Box; mObjectsBegin = ObjectsBegin; mObjectsEnd = ObjectsEnd; mNumberOfObjects = ObjectsEnd-ObjectsBegin; // Rebuild the bins free(mpPartitionBins); mpPartitionBins = new BinsObjectDynamicMpi<TConfigure>(mObjectsBegin, mObjectsEnd); // Assign the partition to the objects based on their cell double maxRadius = ReduceMaxRadius(mObjectsBegin, mObjectsEnd); for(std::size_t i = 0; i < (std::size_t) mNumberOfObjects; i++) { auto ObjectItr = mObjectsBegin + i; TConfigure::CalculateBoundingBox(*ObjectItr, Low, High); for(int i = 0; i < Dimension; i++) { Low[i] -= maxRadius; High[i] += maxRadius; } Box.Set( mpPartitionBins->CalculateCell(Low), mpPartitionBins->CalculateCell(High), mpPartitionBins->GetDivisions()); std::unordered_set<std::size_t> partitionSet; auto ObjectRadius = TConfigure::GetObjectRadius(*ObjectItr, 0.0f); mpPartitionBins->SearchPartitionInRadius(Box, *ObjectItr, partitionSet, ObjectRadius); std::vector<std::size_t> partitionList(partitionSet.begin(), partitionSet.end()); for(unsigned int i = 0; i < partitionList.size(); i++) { domainGraph(mpi_rank, mpi_rank) = 1; domainGraph(partitionList[i], mpi_rank) = 1; domainGraph(mpi_rank, partitionList[i]) = 1; } } } /// Destructor. virtual ~LloydParallelPartitioner() { delete mpPartitionBins; } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name MPI Variables ///@{ int mpi_rank; int mpi_size; int mNumberOfObjects; int mNumberOfCells; IteratorType mObjectsBegin; IteratorType mObjectsEnd; BinsObjectDynamicMpi<TConfigure> * mpPartitionBins; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void CreatePartition(SizeType number_of_threads, const SizeType number_of_rows, std::vector<SizeType>& partitions) { partitions.resize(number_of_threads+1); SizeType partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(SizeType i = 1; i<number_of_threads; i++) { partitions[i] = partitions[i-1] + partition_size; } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} public: /// Assignment operator. LloydParallelPartitioner<TConfigure> & operator=(const LloydParallelPartitioner<TConfigure> & rOther) { mObjectsBegin = rOther.mObjectsBegin; mObjectsEnd = rOther.mObjectsEnd; mpPartitionBins = rOther.mpPartitionBins; return *this; } /// Copy constructor. LloydParallelPartitioner(const LloydParallelPartitioner& rOther) { *this = rOther; } }; // Class BinsObjectDynamic ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TConfigure> inline std::istream& operator >> (std::istream& rIStream, LloydParallelPartitioner<TConfigure>& rThis) { return rIStream; } /// output stream function template<class TConfigure> inline std::ostream& operator << (std::ostream& rOStream, const LloydParallelPartitioner<TConfigure> & rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_LLOYD_PARALLEL_PARTITIONER_H_INCLUDED defined
gt.filter.c
/* * PROJECT: GEM-Tools library * FILE: gt.filter.c * DATE: 02/08/2012 * AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com> * DESCRIPTION: Application to filter {MAP,SAM,FASTQ} files and output the filtered result */ #ifdef HAVE_OPENMP #include <omp.h> #endif #include "gem_tools.h" #define GT_FILTER_FLOAT_NO_VALUE (-1.0) #define gt_filter_cond_fatal_error_msg(condition,error_msg,args...) \ gt_cond_fatal_error_msg(condition,error_msg ". File '%s', line %"PRIu64"\n",##args, \ parameters.name_input_file,__buffered_input->current_line_num-1) #define gt_filter_fatal_error_msg(error_msg,args...) \ gt_fatal_error_msg(error_msg ". File '%s', line %"PRIu64"\n",##args, \ parameters.name_input_file,__buffered_input->current_line_num-1) typedef struct { uint64_t min; uint64_t max; } gt_filter_quality_range; typedef struct { /* I/O */ char* name_input_file; char* name_output_file; char* name_reference_file; char* name_gem_index_file; char* annotation; gt_gtf* gtf; bool mmap_input; bool paired_end; bool no_output; gt_file_format output_format; bool discarded_output; bool check_duplicates; char* name_discarded_output_file; gt_file_format discarded_output_format; /* Filter Read/Qualities */ bool hard_trim; uint64_t left_trim; uint64_t right_trim; bool restore_trim; bool uniform_read; bool uniform_read_strict; bool qualities_to_offset_33; bool qualities_to_offset_64; bool remove_qualities; bool add_qualities; /* Filter Template/Alignments */ bool mapped; bool unmapped; int64_t unique_level; float min_length; float max_length; int64_t min_maps; int64_t max_maps; float max_strata_after_map; /* Make templates unique */ int64_t reduce_to_unique_strata; int64_t reduce_by_quality; bool reduce_to_pairs; uint64_t reduce_to_unique; bool reduce_by_gene_id; bool reduce_to_protein_coding; /* RNA Seq to recalculate counters */ bool reduce_by_junctions; bool no_split_maps; bool only_split_maps; bool no_penalty_for_splitmaps; uint64_t min_intron_length; uint64_t min_block_length; /* Filter SE-Maps */ bool first_map; bool keep_first_map; bool keep_unique; bool matches_pruning; uint64_t max_decoded_matches; uint64_t min_decoded_strata; uint64_t max_output_matches; uint64_t max_input_matches; bool make_counters; bool only_unmapped; bool only_mapped; float min_event_distance; float max_event_distance; float min_levenshtein_distance; float max_levenshtein_distance; gt_vector* map_ids; gt_shash* gtf_types; bool filter_by_strand_se; bool allow_strand_r; bool allow_strand_f; gt_vector* quality_score_ranges; /* (gt_filter_quality_range) */ /* Filter PE-Maps */ int64_t max_inss; int64_t min_inss; bool filter_by_strand_pe; bool allow_strand_rf; bool allow_strand_fr; bool allow_strand_ff; bool allow_strand_rr; /* Filter-Realign */ bool mismatch_recovery; bool realign_hamming; bool realign_levenshtein; /* Checking/Report */ bool check; bool check_format; gt_file_format check_file_format; /* Hidden */ bool special_functionality; bool error_plot; // Print error distribution (depreciated) bool insert_size_plot; // Print insert size distribution (depreciated) bool show_sequence_list; // Display sequence list in the GEMindex/.fa... bool display_pretty; // Display pretty printed map(s) bool group_reads; // Group previously split reads bool sample_read; // Sample the read in chunks (annotated by chunk group) float split_chunk_size; float split_step_size; float split_left_trim; float split_right_trim; float split_min_remainder; /* Misc */ uint64_t num_threads; bool verbose; /* Control flags */ bool perform_dna_map_filter; // Any DNA-filtering criteria activated bool perform_rna_map_filter; // Any RNA-filtering criteria activated bool perform_annotation_filter; // Any annotation based filtering criteria activated bool load_index; } gt_filter_args; gt_filter_args parameters = { /* I/O */ .name_input_file=NULL, .name_output_file=NULL, .name_reference_file=NULL, .name_gem_index_file=NULL, .annotation = NULL, .gtf = NULL, .mmap_input=false, .paired_end=false, .no_output=false, .output_format=FILE_FORMAT_UNKNOWN, .discarded_output = false, .name_discarded_output_file=NULL, .discarded_output_format=FILE_FORMAT_UNKNOWN, .check_duplicates=false, /* Filter Read/Qualities */ .hard_trim=false, .left_trim=0, .right_trim=0, .restore_trim=false, .uniform_read=false, .uniform_read_strict=false, .qualities_to_offset_33=false, .qualities_to_offset_64=false, .remove_qualities=false, .add_qualities=false, /* Filter Template/Alignments */ .mapped=false, .unmapped=false, .unique_level=-1, .min_length=-1.0, .max_length=-1.0, .min_maps=-1, .max_strata_after_map=-1.0, .max_maps=-1, /* Make templates unique */ .reduce_to_unique_strata=-1, .reduce_by_gene_id=false, .reduce_by_junctions=false, .reduce_to_protein_coding=false, .reduce_to_unique=UINT64_MAX, .reduce_to_pairs=false, .reduce_by_quality=-1, /* RNA Seq */ .no_split_maps=false, .only_split_maps=false, .no_penalty_for_splitmaps=false, .min_intron_length=0, .min_block_length=0, /* Filter SE-Maps */ .first_map=false, .keep_first_map=false, .keep_unique=false, .matches_pruning=false, .max_decoded_matches=GT_ALL, .min_decoded_strata=0, .max_output_matches=GT_ALL, .max_input_matches=GT_ALL, .make_counters=false, .only_unmapped=false, .only_mapped=false, .min_event_distance=GT_FILTER_FLOAT_NO_VALUE, .max_event_distance=GT_FILTER_FLOAT_NO_VALUE, .min_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE, .max_levenshtein_distance=GT_FILTER_FLOAT_NO_VALUE, .map_ids=NULL, .gtf_types=NULL, .filter_by_strand_se=false, .allow_strand_r=false, .allow_strand_f=false, .quality_score_ranges = NULL, /* Filter PE-Maps */ .max_inss=INT64_MAX, .min_inss=INT64_MIN, .filter_by_strand_pe=false, .allow_strand_rf=false, .allow_strand_fr=false, .allow_strand_ff=false, .allow_strand_rr=false, /* Filter-Realign */ .mismatch_recovery=false, .realign_hamming=false, .realign_levenshtein=false, /* Checking/Report */ .check = false, .check_format = false, /* Hidden */ .special_functionality = false, .error_plot = false, .insert_size_plot = false, .show_sequence_list = false, .display_pretty = false, .group_reads = false, .sample_read = false, .split_chunk_size = -1.0, .split_step_size = -1.0, .split_left_trim = -1.0, .split_right_trim = -1.0, .split_min_remainder = 0.0, /* Misc */ .num_threads=1, .verbose=false, /* Control flags */ .perform_dna_map_filter=false, .perform_rna_map_filter=false, .perform_annotation_filter=false, .load_index=false }; /* * Helper to get num maps correctly also for unpaired * mapped pairs */ GT_INLINE uint64_t gt_filter_get_num_maps(gt_template* template){ GT_TEMPLATE_IF_SE_ALINGMENT(template) { return gt_template_get_num_mmaps(template); } else { if (!gt_template_is_mapped(template)) { GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_end1,alignment_end2); return gt_alignment_get_num_maps(alignment_end1) + gt_alignment_get_num_maps(alignment_end2); } else { return gt_template_get_num_mmaps(template); } } } /* * Checking/(Re)Aligning/MismsRecovery */ GT_INLINE void gt_filter_mismatch_recovery_maps( char* const name_input_file,const uint64_t current_line_num, gt_template* const template,gt_sequence_archive* const sequence_archive) { // Unfolded as to report errors in the recovery gt_status error_code; uint64_t alignment_pos = 0; GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { uint64_t map_pos = 0; GT_ALIGNMENT_ITERATE(alignment,map) { if ((error_code=gt_map_recover_mismatches_sa(map,alignment->read,sequence_archive))) { gt_error_msg("Unrecoverable Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ", name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos); gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive); } ++map_pos; } gt_alignment_recalculate_counters(alignment); ++alignment_pos; } if (gt_template_get_num_blocks(template)>1) gt_template_recalculate_counters(template); } GT_INLINE bool gt_filter_check_maps( char* const name_input_file,const uint64_t current_line_num, gt_template* const template,gt_sequence_archive* const sequence_archive, uint64_t* const total_algs_checked,uint64_t* const total_algs_correct, uint64_t* const total_maps_checked,uint64_t* const total_maps_correct) { bool alignment_correct=true; gt_status error_code; uint64_t alignment_pos = 0; GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { uint64_t map_pos = 0; GT_ALIGNMENT_ITERATE(alignment,map) { if ((error_code=gt_map_check_alignment_sa(map,alignment->read,sequence_archive))) { gt_error_msg("Wrong Alignment '%s':%"PRIu64"\n\tREAD::'"PRIgts"':%"PRIu64":%"PRIu64" ", name_input_file,current_line_num,PRIgts_content(template->tag),alignment_pos,map_pos); gt_output_map_fprint_map_pretty_sa(stdout,map,alignment->read,sequence_archive); alignment_correct = false; } else { ++(*total_maps_correct); } ++(*total_maps_checked); ++map_pos; } ++alignment_pos; } ++(*total_algs_checked); if (alignment_correct) { ++(*total_algs_correct); return true; } else { return false; } } /* * Filtering MAPs functions */ void gt_filter_delete_map_ids(gt_vector* filter_map_ids) { // Free vector if (filter_map_ids!=NULL) { GT_VECTOR_ITERATE(filter_map_ids,map_id,pos,gt_string*) { gt_string_delete(*map_id); } gt_vector_delete(filter_map_ids); } } GT_INLINE bool gt_filter_is_sequence_name_allowed(gt_string* const seq_name) { GT_VECTOR_ITERATE(parameters.map_ids,map_id,pos,gt_string*) { if (gt_string_equals(seq_name,*map_id)) return true; } return false; } GT_INLINE bool gt_filter_is_quality_value_allowed(const uint64_t quality_score) { GT_VECTOR_ITERATE(parameters.quality_score_ranges,quality_range,pos,gt_filter_quality_range) { if (quality_score >= quality_range->min && quality_score <= quality_range->max) return true; } return false; } GT_INLINE void gt_filter_prune_matches(gt_template* const template) { uint64_t max_num_matches = GT_ALL; if (parameters.max_decoded_matches!=GT_ALL || parameters.min_decoded_strata!=0) { uint64_t max_strata; gt_counters_calculate_num_maps(gt_template_get_counters_vector(template), parameters.min_decoded_strata,parameters.max_decoded_matches,&max_strata,&max_num_matches); } if (parameters.max_output_matches!=GT_ALL) { max_num_matches = GT_MIN(max_num_matches,parameters.max_output_matches); } // Reduce matches if (max_num_matches < GT_ALL) { gt_template_reduce_mmaps(template,max_num_matches); } } GT_INLINE bool gt_filter_has_junction(gt_map* const map,const uint64_t start,const uint64_t end) { GT_MAP_ITERATE(map,map_block) { if (gt_map_has_next_block(map_block)) { const bool forward = (gt_map_get_strand(map_block) == FORWARD); // Is the junction in the overlap ? const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1; const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1; if (junctions_start == start && junctions_end == end) return true; } } return false; } GT_INLINE uint64_t gt_filter_count_junctions_in_region(gt_map* const map,const uint64_t start,const uint64_t end) { uint64_t count = 0; GT_MAP_ITERATE(map,map_block) { if (gt_map_has_next_block(map_block)) { const bool forward = (gt_map_get_strand(map_block) == FORWARD); // Is the junction in the overlap ? const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1; const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1; if ((junctions_end >= start) && (junctions_start <= end)){ count++; } } } return count; } GT_INLINE bool gt_filter_are_overlapping_pairs_coherent(gt_map** const mmap) { if (!gt_map_has_next_block(mmap[0]) && !gt_map_has_next_block(mmap[1])) return true; // Check overlap uint64_t overlap_start, overlap_end; if (gt_map_block_overlap(mmap[0],mmap[1],&overlap_start,&overlap_end)) { uint64_t junctions_in_1 = gt_filter_count_junctions_in_region(mmap[0], overlap_start, overlap_end); uint64_t junctions_in_2 = gt_filter_count_junctions_in_region(mmap[1], overlap_start, overlap_end); if(junctions_in_1 != junctions_in_2) return false; GT_MAP_ITERATE(mmap[0],map_block) { if (gt_map_has_next_block(map_block)) { const bool forward = (gt_map_get_strand(map_block) == FORWARD); // Is the junction in the overlap ? const uint64_t junctions_start = gt_map_get_end_mapping_position(forward ? map_block: gt_map_get_next_block(map_block)) + 1; const uint64_t junctions_end = gt_map_get_begin_mapping_position(forward ? gt_map_get_next_block(map_block): map_block) - 1; // Find the junctions start in the other map if (junctions_start >= overlap_start && junctions_start < overlap_end && !gt_filter_has_junction(mmap[1],junctions_start,junctions_end)) { return false; // Start not found, not overlapping split maps } } } } return true; } GT_INLINE void gt_filter_add_from_hit(gt_template* const template,gt_gtf_hit* hit, uint64_t target_block) { if (hit->mmap != NULL) { // add PE gt_map** mmap_copy = gt_mmap_array_copy(hit->mmap, hit->num_template_blocks); gt_template_insert_mmap(template,mmap_copy,hit->map_attributes, parameters.check_duplicates); free(mmap_copy); } else if(hit->map != NULL) { if(target_block > 0){ GT_TEMPLATE_REDUCE_BOTH_ENDS(template,alignment_1, alignment_2); if(target_block == 1){ gt_alignment_insert_map(alignment_1,gt_map_copy(hit->map), parameters.check_duplicates); }else{ gt_alignment_insert_map(alignment_2,gt_map_copy(hit->map), parameters.check_duplicates); } }else{ GT_TEMPLATE_REDUCTION(template,alignment_dst); gt_alignment_insert_map(alignment_dst,gt_map_copy(hit->map), parameters.check_duplicates); } } } GT_INLINE bool gt_filter_make_reduce_by_annotation_alignment(gt_template* const template_dst,gt_alignment* const alignment, uint64_t block, gt_gtf_hits* hits) { bool filtered = false; gt_gtf_search_alignment_hits(parameters.gtf, hits, alignment); bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1); bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1); bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0); if(gene_id || prot_coding){ GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){ gt_gtf_hit* hit = *e; if(junction_hits){ double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions; if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration) continue; } if(gene_id && !hit->pairs_gene)continue; if(prot_coding && !hit->is_protein_coding)continue; filtered = true; gt_filter_add_from_hit(template_dst, hit, block); } } return filtered; } GT_INLINE bool gt_filter_make_reduce_by_annotation(gt_template* const template_dst,gt_template* const template_src) { bool filtered = false; GT_TEMPLATE_IF_SE_ALINGMENT(template_src) { GT_TEMPLATE_REDUCTION(template_src,alignment_src); gt_gtf_hits* hits = gt_gtf_hits_new(); filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_src, 0, hits); gt_gtf_hits_delete(hits); return filtered; } else { if (!gt_template_is_mapped(template_src)) { GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_end1,alignment_end2); gt_gtf_hits* hits = gt_gtf_hits_new(); filtered = gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end1, 1, hits); if(!filtered){ // add all as we want to preserve them in case second alignment is filtered. GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){ gt_filter_add_from_hit(template_dst, *e, 1); } } gt_gtf_hits_clear(hits); if(gt_filter_make_reduce_by_annotation_alignment(template_dst, alignment_end2, 2, hits)){ filtered = true; }else if(filtered){ // alignment 1 was filtered, so we have to copy all from alignment 2 GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){ gt_filter_add_from_hit(template_dst, *e, 2); } } gt_gtf_hits_delete(hits); return filtered; } else { gt_gtf_hits* hits = gt_gtf_hits_new(); gt_gtf_search_template_hits(parameters.gtf, hits, template_src); bool prot_coding = (parameters.reduce_to_protein_coding && hits->num_protein_coding >= 1); bool gene_id = (parameters.reduce_by_gene_id && hits->num_paired_genes >= 1); bool junction_hits = (parameters.reduce_by_junctions && hits->junction_hit_ration > 0.0); if(gene_id || prot_coding || junction_hits){ GT_VECTOR_ITERATE(hits->exon_hits, e, c, gt_gtf_hit*){ gt_gtf_hit* hit = *e; if(junction_hits){ double junction_ratio = hit->num_junctions == 0 ? -1.0 : (double)hit->num_junctions_hits/(double)hit->num_junctions; if(junction_ratio > 0.0 && junction_ratio != hits->junction_hit_ration)continue; } if(gene_id && !hit->pairs_gene)continue; if(prot_coding && !hit->is_protein_coding)continue; filtered = true; gt_filter_add_from_hit(template_dst, hit, 0); } } gt_gtf_hits_delete(hits); } } return filtered; } void gt_alignment_reduction_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) { // Reduction by unique level (can be calculated beforehand) GT_ALIGNMENT_ITERATE(alignment_src,map) { if (parameters.reduce_to_unique_strata >= 0 && (gt_alignment_get_uniq_degree(alignment_src) >= parameters.reduce_to_unique_strata)) { gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates); break; } if(gt_alignment_get_num_maps(alignment_src) > parameters.reduce_to_unique) break; gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates); } } void gt_alignment_dna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) { const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_alignment_get_counters_vector(alignment_src)) - 1; const uint64_t max_mismatch_quality = gt_alignment_get_max_mismatch_quality(alignment_src); // Reduction by unique level (can be calculated beforehand) bool pick_only_first_map = false; /* * (1) Pre-filtering steps */ gt_map* first_map = NULL; if (parameters.keep_first_map && gt_alignment_get_num_maps(alignment_src)>0) { first_map = gt_map_copy(gt_alignment_get_map(alignment_src,0)); } /* * (2) Filtering of maps */ GT_ALIGNMENT_ITERATE(alignment_src,map) { // Check sequence name if (parameters.map_ids!=NULL) { if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue; } // Filter strata beyond first mapping const int64_t current_stratum = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map); if (parameters.max_strata_after_map >= 0.0 && (current_stratum-first_matching_distance) > gt_alignment_get_read_proportion(alignment_src,parameters.max_strata_after_map)) break; // Check strata if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) { const uint64_t total_distance = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(map) : gt_map_get_global_distance(map); if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_event_distance)) continue; } if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_event_distance)) continue; } } // Check levenshtein distance if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { const uint64_t total_distance = gt_map_get_global_levenshtein_distance(map); if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance < gt_alignment_get_read_proportion(alignment_src,parameters.min_levenshtein_distance)) continue; } if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance > gt_alignment_get_read_proportion(alignment_src,parameters.max_levenshtein_distance)) continue; } } // Filter strand if (parameters.filter_by_strand_se) { if (map->strand==FORWARD && !parameters.allow_strand_f) continue; if (map->strand==REVERSE && !parameters.allow_strand_r) continue; } // Filter quality scores if (parameters.quality_score_ranges!=NULL) { if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? map->phred_score : map->gt_score)) continue; } /* * (3) Reduction of all maps */ if (parameters.reduce_by_quality >= 0) { const int64_t q = gt_alignment_sum_mismatch_qualities(alignment_src,map); if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue; } /* * Insert the map */ gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates); // Skip the rest if first map is enabled if (parameters.first_map || pick_only_first_map) break; } /* * (4) Post-filtering steps */ if (parameters.keep_first_map) { if (gt_alignment_get_num_maps(alignment_dst)==0) { gt_alignment_insert_map(alignment_dst,first_map, parameters.check_duplicates); } else { gt_map_delete(first_map); } } } void gt_template_reduction_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) { GT_TEMPLATE_IF_SE_ALINGMENT(template_src) { GT_TEMPLATE_REDUCTION(template_src,alignment_src); GT_TEMPLATE_REDUCTION(template_dst,alignment_dst); gt_alignment_reduction_filter(alignment_dst,alignment_src,file_format); } else { if (!gt_template_is_mapped(template_src)) { if(!parameters.reduce_to_pairs){ GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2); GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2); gt_alignment_reduction_filter(alignment_dst_end1,alignment_src_end1,file_format); gt_alignment_reduction_filter(alignment_dst_end2,alignment_src_end2,file_format); } } else { GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) { if (parameters.reduce_to_unique_strata >= 0 && (gt_template_get_uniq_degree(template_src) >= parameters.reduce_to_unique_strata)) { gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks); gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates); free(mmap_copy); break; } if(gt_template_get_num_mmaps(template_src) >= parameters.reduce_to_unique) break; gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks); gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates); free(mmap_copy); } } } } void gt_template_dna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) { /* * Filtering workflow * (1) Pre-filtering steps * (2) Filtering of maps (taking them into account individually) * (3) Reduction of all maps (taking them into account as a whole) * (4) Post-filtering steps */ GT_TEMPLATE_IF_SE_ALINGMENT(template_src) { GT_TEMPLATE_REDUCTION(template_src,alignment_src); GT_TEMPLATE_REDUCTION(template_dst,alignment_dst); gt_alignment_dna_filter(alignment_dst,alignment_src,file_format); } else { if (!gt_template_is_mapped(template_src)) { GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2); GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2); gt_alignment_dna_filter(alignment_dst_end1,alignment_src_end1,file_format); gt_alignment_dna_filter(alignment_dst_end2,alignment_src_end2,file_format); } else { const uint64_t first_matching_distance = gt_counters_get_min_matching_strata(gt_template_get_counters_vector(template_src))-1; const uint64_t max_mismatch_quality = gt_template_get_max_mismatch_quality(template_src); // Reduction by unique level (can be calculated beforehand) bool pick_only_first_map = false; /* * (1) Pre-filtering steps */ gt_map** first_mmap = NULL; gt_mmap_attributes first_mmap_attributes = {0, 0, 0}; if (parameters.keep_first_map && gt_template_get_num_mmaps(template_src)>0) { gt_mmap* const mmap = gt_template_get_mmap(template_src,0); first_mmap = gt_mmap_array_copy(mmap->mmap,gt_template_get_num_blocks(template_src)); first_mmap_attributes = mmap->attributes; } /* * (2) Filtering of maps */ GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) { const int64_t current_stratum = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]): gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]); if (parameters.max_strata_after_map >= 0.0 && (current_stratum-first_matching_distance) > gt_template_get_read_proportion(template_src,parameters.max_strata_after_map)) break; // Check sequence name if (parameters.map_ids!=NULL) { if (!gt_filter_is_sequence_name_allowed(mmap[0]->seq_name)) continue; if (!gt_filter_is_sequence_name_allowed(mmap[1]->seq_name)) continue; } // Check strata if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) { const int64_t total_distance = parameters.no_penalty_for_splitmaps ? gt_map_get_no_split_distance(mmap[0]) + gt_map_get_no_split_distance(mmap[1]): gt_map_get_global_distance(mmap[0]) + gt_map_get_global_distance(mmap[1]); if (parameters.min_event_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_event_distance)) continue; } if (parameters.max_event_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_event_distance)) continue; } } // Check levenshtein distance if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE || parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { const int64_t total_distance = gt_map_get_global_levenshtein_distance(mmap[0])+gt_map_get_global_levenshtein_distance(mmap[1]); if (parameters.min_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance < gt_template_get_read_proportion(template_src,parameters.min_levenshtein_distance)) continue; } if (parameters.max_levenshtein_distance != GT_FILTER_FLOAT_NO_VALUE) { if (total_distance > gt_template_get_read_proportion(template_src,parameters.max_levenshtein_distance)) continue; } } // Check inss if (parameters.min_inss > INT64_MIN || parameters.max_inss < INT64_MAX) { gt_status error_code; const int64_t inss = gt_template_get_insert_size(mmap,&error_code,0,0); if (parameters.min_inss > inss || inss > parameters.max_inss) continue; } // Check strandness if (parameters.filter_by_strand_se) { if (!parameters.allow_strand_f && (mmap[0]->strand==FORWARD || mmap[1]->strand==FORWARD)) continue; if (!parameters.allow_strand_r && (mmap[0]->strand==REVERSE || mmap[1]->strand==REVERSE)) continue; } if (parameters.filter_by_strand_pe) { if (mmap[0]->strand==FORWARD && mmap[1]->strand==REVERSE && !parameters.allow_strand_fr) continue; if (mmap[0]->strand==REVERSE && mmap[1]->strand==FORWARD && !parameters.allow_strand_rf) continue; if (mmap[0]->strand==FORWARD && mmap[1]->strand==FORWARD && !parameters.allow_strand_ff) continue; if (mmap[0]->strand==REVERSE && mmap[1]->strand==REVERSE && !parameters.allow_strand_rr) continue; } // Filter quality scores if (parameters.quality_score_ranges!=NULL) { if (!gt_filter_is_quality_value_allowed((file_format==SAM) ? mmap_attributes->phred_score : mmap_attributes->gt_score)) continue; } /* * (3) Reduction of all maps */ if (parameters.reduce_by_quality >= 0) { const int64_t q = gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,0), mmap[0]) + gt_alignment_sum_mismatch_qualities(gt_template_get_block(template_src,1), mmap[1]); if (q!=0 && q!=max_mismatch_quality && abs(max_mismatch_quality-q)<=parameters.reduce_by_quality) continue; } /* * Insert the map */ gt_map** mmap_copy = gt_mmap_array_copy(mmap,__mmap_num_blocks); gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates); free(mmap_copy); // Skip the rest if first map is enabled if (parameters.first_map || pick_only_first_map) break; } /* * (4) Post-filtering steps */ if (parameters.keep_first_map) { if (gt_template_get_num_mmaps(template_dst)==0) { gt_template_insert_mmap(template_dst,first_mmap,&first_mmap_attributes, parameters.check_duplicates); } free(first_mmap); } } } } void gt_alignment_rna_filter(gt_alignment* const alignment_dst,gt_alignment* const alignment_src,const gt_file_format file_format) { GT_ALIGNMENT_ITERATE(alignment_src,map) { // Check sequence name if (parameters.map_ids!=NULL) { if (!gt_filter_is_sequence_name_allowed(map->seq_name)) continue; } // Check SM contained const uint64_t num_blocks = gt_map_get_num_blocks(map); if (parameters.no_split_maps && num_blocks>1) continue; if (parameters.only_split_maps && num_blocks==1) continue; // Filter intron length if (parameters.min_intron_length > 0) { if (gt_map_get_num_blocks(map) > 1) { if(gt_map_get_min_intron_length(map) < parameters.min_intron_length){ continue; } } } // Filter block length if (parameters.min_block_length > 0) { if (gt_map_get_num_blocks(map) > 1) { if (gt_map_get_min_block_length(map) < parameters.min_block_length) continue; } } // Insert the map gt_alignment_insert_map(alignment_dst,gt_map_copy(map), parameters.check_duplicates); // Skip the rest if best if (parameters.first_map) return; } } void gt_template_rna_filter(gt_template* const template_dst,gt_template* const template_src,const gt_file_format file_format) { GT_TEMPLATE_IF_SE_ALINGMENT(template_src) { GT_TEMPLATE_REDUCTION(template_src,alignment_src); GT_TEMPLATE_REDUCTION(template_dst,alignment_dst); /* * SE */ gt_alignment_rna_filter(alignment_dst,alignment_src,file_format); } else { /* * PE */ if (!gt_template_is_mapped(template_src)) { GT_TEMPLATE_REDUCE_BOTH_ENDS(template_src,alignment_src_end1,alignment_src_end2); GT_TEMPLATE_REDUCE_BOTH_ENDS(template_dst,alignment_dst_end1,alignment_dst_end2); gt_alignment_rna_filter(alignment_dst_end1,alignment_src_end1,file_format); gt_alignment_rna_filter(alignment_dst_end2,alignment_src_end2,file_format); } else { const uint64_t num_blocks = gt_template_get_num_blocks(template_src); GT_TEMPLATE_ITERATE_MMAP__ATTR(template_src,mmap,mmap_attributes) { // Check SM contained and get minimum intron length uint64_t has_sm = false; uint64_t min_intron_length = UINT64_MAX, min_block_length = UINT64_MAX; if (parameters.no_split_maps || parameters.only_split_maps || parameters.min_intron_length >= 0) { GT_MMAP_ITERATE(mmap,map,end_p) { if (gt_map_get_num_blocks(map) > 1) { const uint64_t mil = gt_map_get_min_intron_length(map); const uint64_t mbl = gt_map_get_min_block_length(map); has_sm = true; if (mil >= 0 && mil < min_intron_length) min_intron_length = mil; if (mbl >= 0 && mbl < min_block_length) min_block_length = mbl; } } } if (parameters.no_split_maps && has_sm) continue; if (parameters.only_split_maps && !has_sm) continue; // Filter intron length if (parameters.min_intron_length > 0 && min_intron_length != UINT64_MAX){ if(min_intron_length < parameters.min_intron_length){ continue; } } // Filter block length if (parameters.min_block_length > 0 && min_block_length != UINT64_MAX){ if(min_block_length < parameters.min_block_length) continue; } // Add the mmap gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks); gt_template_insert_mmap(template_dst,mmap_copy,mmap_attributes, parameters.check_duplicates); free(mmap_copy); // Skip the rest if best if (parameters.first_map) return; } } } } GT_INLINE bool gt_filter_apply_filters( const gt_file_format file_format,const uint64_t line_no, gt_sequence_archive* const sequence_archive,gt_template* const template) { /* * Recalculate counters without penalty for splitmaps */ if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); } /* * Process Read/Qualities // TODO: move out of filter (this is processing) */ const uint64_t has_qualities = gt_template_has_qualities(template); if (parameters.remove_qualities && has_qualities) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { gt_string_clear(alignment->qualities); } } else if (parameters.add_qualities && !has_qualities) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { const uint64_t read_length = gt_alignment_get_read_length(alignment); gt_string_resize(alignment->qualities,read_length+1); gt_string_set_length(alignment->qualities,read_length); GT_STRING_ITERATE(alignment->qualities,buffer,i) { buffer[i]='~'; } } } if (parameters.uniform_read) { if (parameters.uniform_read_strict) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { gt_dna_read_uniform_strict_content(alignment->read,alignment->qualities); } } else { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { gt_dna_read_uniform_content(alignment->read,alignment->qualities); } } } if (has_qualities) { if (parameters.qualities_to_offset_33) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { gt_qualities_adapt_from_offset64_to_offset33(alignment->qualities); } } if (parameters.qualities_to_offset_64) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { gt_qualities_adapt_from_offset33_to_offset64(alignment->qualities); } } } /* * Template/Alignment Filter */ // Consider mapped/unmapped const bool is_mapped = gt_template_is_mapped(template); if (parameters.mapped && !is_mapped) return false; if (parameters.unmapped && is_mapped) return false; // Unique based filtering if (parameters.unique_level>=0.0 && is_mapped) { if (parameters.unique_level > gt_template_get_uniq_degree(template)) return false; } // Filter by read length if (parameters.min_length>=0.0 || parameters.max_length>=0.0) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { const uint64_t read_length = gt_alignment_get_read_length(alignment); if (parameters.min_length>=0.0) { const uint64_t min_length = gt_alignment_get_read_proportion(alignment,parameters.min_length); if (read_length < min_length) return false; } if (parameters.max_length>=0.0) { const uint64_t max_length = gt_alignment_get_read_proportion(alignment,parameters.max_length); if (read_length > max_length) return false; } } } // Filter by number of maps if (parameters.min_maps>=0 || parameters.max_maps>=0) { const uint64_t num_maps = gt_template_get_num_mmaps(template); if (parameters.min_maps>=0 && num_maps<parameters.min_maps) return false; if (parameters.max_maps>=0 && num_maps>parameters.max_maps) return false; } /* * MAP Filter */ // Trim if (parameters.hard_trim) { gt_template_hard_trim(template,parameters.left_trim,parameters.right_trim); gt_template_recalculate_counters(template); } else if (parameters.restore_trim) { gt_template_restore_trim(template); gt_template_recalculate_counters(template); } // (Re)Align if (parameters.realign_levenshtein) { gt_template_realign_levenshtein(template,sequence_archive); } else if (parameters.realign_hamming) { gt_template_realign_hamming(template,sequence_archive); } else if (parameters.mismatch_recovery) { gt_filter_mismatch_recovery_maps(parameters.name_input_file,line_no,template,sequence_archive); } // check the split-map pairs for all paired alignments and // remove mapping pairs where the split are not coherent if(gt_template_get_num_blocks(template) == 2 && gt_template_is_mapped(template)){ gt_template *template_filtered = gt_template_dup(template,false,false); const uint64_t num_blocks = gt_template_get_num_blocks(template); GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attributes) { if (!gt_filter_are_overlapping_pairs_coherent(mmap))continue; gt_map** mmap_copy = gt_mmap_array_copy(mmap,num_blocks); gt_template_insert_mmap(template_filtered,mmap_copy,mmap_attributes, parameters.check_duplicates); free(mmap_copy); } gt_template_swap(template,template_filtered); gt_template_delete(template_filtered); if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); }else{ gt_template_recalculate_counters(template); } } // Map DNA-filtering uint64_t num_maps = gt_filter_get_num_maps(template); if (parameters.perform_dna_map_filter && (!parameters.keep_unique || num_maps > 1)) { gt_template *template_filtered = gt_template_dup(template,false,false); gt_template_dna_filter(template_filtered,template,file_format); // if keep_unique is on, we only flip if we have at least one // alignment left if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){ gt_template_swap(template,template_filtered); } gt_template_delete(template_filtered); if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); }else{ gt_template_recalculate_counters(template); } } // Map RNA-filtering num_maps = gt_filter_get_num_maps(template); if (parameters.perform_rna_map_filter && (!parameters.keep_unique || num_maps > 1)) { gt_template *template_filtered = gt_template_dup(template,false,false); gt_template_rna_filter(template_filtered,template,file_format); // if keep_unique is on, we only flip if we have at least one // alignment left if(!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0){ gt_template_swap(template,template_filtered); } // delete filtered and recalculate counters gt_template_delete(template_filtered); if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); }else{ gt_template_recalculate_counters(template); } } // Map Annotation-filtering num_maps = gt_filter_get_num_maps(template); if (parameters.gtf != NULL && parameters.perform_annotation_filter && num_maps > 1) { gt_template *template_filtered = gt_template_dup(template,false,false); bool filtered = gt_filter_make_reduce_by_annotation(template_filtered,template); if(filtered && (!parameters.keep_unique || gt_filter_get_num_maps(template_filtered) > 0)){ gt_template_swap(template,template_filtered); } gt_template_delete(template_filtered); if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); }else{ gt_template_recalculate_counters(template); } } // reduce by level filter num_maps = gt_filter_get_num_maps(template); if ((parameters.reduce_to_unique_strata >= 0 || parameters.reduce_to_unique != UINT64_MAX|| parameters.reduce_to_pairs) && (num_maps > 1)) { gt_template *template_filtered = gt_template_dup(template,false,false); gt_template_reduction_filter(template_filtered,template,file_format); gt_template_swap(template,template_filtered); gt_template_delete(template_filtered); if (parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters_no_splits(template); gt_template_sort_by_distance__score_no_split(template); }else{ gt_template_recalculate_counters(template); } } // Map pruning if (parameters.matches_pruning) gt_filter_prune_matches(template); // Make counters if (parameters.make_counters || parameters.no_penalty_for_splitmaps) { gt_template_recalculate_counters(template); } // Ok, go on return true; } GT_INLINE void gt_filter__print( const gt_file_format file_format,const uint64_t line_no, gt_sequence_archive* const sequence_archive,gt_template* const template, uint64_t* const total_algs_checked,uint64_t* const total_algs_correct, uint64_t* const total_maps_checked,uint64_t* const total_maps_correct, gt_buffered_output_file* const buffered_output,gt_generic_printer_attributes* const generic_printer_attributes, gt_buffered_output_file* const buffered_discarded_output,gt_generic_printer_attributes* const discarded_output_attributes) { bool discaded = false; /* * Apply Filters */ if (!gt_filter_apply_filters(file_format,line_no,sequence_archive,template)) discaded = true; if (parameters.uniform_read) { // Check zero-length reads GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { if (gt_alignment_get_read_length(alignment)==0) return; } } /* * Check */ if (!discaded && parameters.check) { if (!gt_filter_check_maps(parameters.name_input_file,line_no, template,sequence_archive,total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct)) discaded = true; } /* * Print template */ if (!parameters.no_output && !discaded) { if (gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes)) { gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n", PRIgts_content(gt_template_get_string_tag(template)),line_no); } } else if (discaded && buffered_discarded_output!=NULL) { if (gt_output_generic_bofprint_template(buffered_discarded_output,template,discarded_output_attributes)) { gt_error_msg("Fatal error outputting read '"PRIgts"'(InputLine:%"PRIu64")\n", PRIgts_content(gt_template_get_string_tag(template)),line_no); } } } /* * Special funcionality */ GT_INLINE void gt_filter_sample_read_print_fastq( gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read,gt_string* const qualities, const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments, const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) { gt_bofprintf(buffered_output,"@"PRIgts,PRIgts_content(tag)); if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read if (left_trim > 0) { gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts":"PRIgts,left_trim, PRIgts_range_content(read,0,left_trim), PRIgts_range_content(qualities,0,left_trim)); // Left-trim } if (right_trim > 0) { gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts":"PRIgts,right_trim, PRIgts_range_content(read,left_trim+chunk_size,right_trim), PRIgts_range_content(qualities,left_trim+chunk_size,right_trim)); // Right-trim } // Print READ + QUALITIES (trimmed) gt_bofprintf(buffered_output,"\n"PRIgts"\n+\n"PRIgts"\n", PRIgts_trimmed_content(read,left_trim,right_trim), PRIgts_trimmed_content(qualities,left_trim,right_trim)); } GT_INLINE void gt_filter_sample_read_print_fasta( gt_buffered_output_file* const buffered_output,gt_string* const tag,gt_string* const read, const bool print_segmented_read_info,const uint64_t segment_id,const uint64_t total_segments, const uint64_t left_trim,const uint64_t right_trim,const uint64_t chunk_size) { gt_bofprintf(buffered_output,">"PRIgts,PRIgts_content(tag)); if (print_segmented_read_info) gt_output_bofprint_segmented_read_info(buffered_output,segment_id,total_segments); // Segmented Read if (left_trim > 0) { gt_bofprintf(buffered_output," lt:Z:%"PRIu64":"PRIgts,left_trim, PRIgts_range_content(read,0,left_trim)); // Left-trim } if (right_trim > 0) { gt_bofprintf(buffered_output," rt:Z:%"PRIu64":"PRIgts,right_trim, PRIgts_range_content(read,left_trim+chunk_size,right_trim)); // Right-trim } // Print READ (trimmed) gt_bofprintf(buffered_output,"\n"PRIgts"\n", PRIgts_trimmed_content(read,left_trim,right_trim)); } GT_INLINE void gt_filter_group_reads() { // Open file IN/OUT gt_input_file* input_file = (parameters.name_input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input); gt_output_file* output_file = (parameters.name_output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE); // Prepare out-printers if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format gt_generic_printer_attributes* const generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format); // SegmentedRead aux variables gt_template* const group_template = gt_template_new(); uint64_t total_segments = 0, last_segment_id = 0; GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) { // Get group attribute gt_segmented_read_info* const segmented_read_info = gt_attributes_get_segmented_read_info(template->attributes); if (segmented_read_info==NULL) { gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id, "Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments); gt_template_restore_trim(template); // If any GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) { gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any } gt_output_generic_bofprint_template(buffered_output,template,generic_printer_attributes); // Print it, as it is } else { // First, undo the trim gt_template_restore_trim(template); // Tackle the group merging if (last_segment_id==total_segments) { /* * New group */ gt_filter_cond_fatal_error_msg(segmented_read_info->total_segments==0 || segmented_read_info->segment_id!=1, "Wrong SegmentedRead Info (Zero reads in group or not properly sorted)"); gt_template_clear(group_template,true); gt_template_copy(group_template,template,true,true); total_segments = segmented_read_info->total_segments; last_segment_id = segmented_read_info->segment_id; } else if (segmented_read_info->segment_id==last_segment_id+1 && segmented_read_info->segment_id <= total_segments) { /* * Old group (Keep merging) */ gt_filter_cond_fatal_error_msg(!gt_string_equals(template->tag,group_template->tag), "Wrong TAG in Segmented Reads Sequence ('"PRIgts"'/'"PRIgts"')",PRIgts_content(group_template->tag),PRIgts_content(template->tag)); gt_template_merge_template_mmaps(group_template,template); last_segment_id = segmented_read_info->segment_id; if (last_segment_id==total_segments) { // Close group GT_TEMPLATE_ITERATE_ALIGNMENT(group_template,alignment) { gt_attributes_remove(alignment->attributes,GT_ATTR_ID_SEGMENTED_READ_INFO); // If any } gt_output_generic_bofprint_template(buffered_output,group_template,generic_printer_attributes); } } else { gt_filter_fatal_error_msg("Wrong SegmentedRead Info => Expected(%"PRIu64"/%"PRIu64")::Found(%"PRIu64"/%"PRIu64").", segmented_read_info->segment_id,segmented_read_info->total_segments,last_segment_id,total_segments); } } } GT_END_READING_WRITING_LOOP(input_file,output_file,template); // Check proper end of merging groups gt_filter_cond_fatal_error_msg(total_segments!=last_segment_id, "Expected SegmentedRead Info => lastRead(%"PRIu64"/%"PRIu64")",last_segment_id,total_segments); // Clean gt_template_delete(group_template); gt_generic_printer_attributes_delete(generic_printer_attributes); gt_input_file_close(input_file); gt_output_file_close(output_file); } GT_INLINE void gt_filter_sample_read() { // Open file IN/OUT gt_input_file* input_file = (parameters.name_input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input); gt_output_file* output_file = (parameters.name_output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE); // Parallel I/O #ifdef HAVE_OPENMP #pragma omp parallel num_threads(parameters.num_threads) #endif { GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) { GT_TEMPLATE_ITERATE_ALIGNMENT(template,alignment) { // Calculate the chunks const uint64_t read_length = gt_alignment_get_read_length(alignment); const uint64_t split_chunk_size = gt_get_integer_proportion(parameters.split_chunk_size,read_length); const uint64_t split_min_remainder = gt_get_integer_proportion(parameters.split_min_remainder,read_length); // Check boundaries if (split_chunk_size >= read_length || split_chunk_size <= split_min_remainder) { if (gt_alignment_has_qualities(alignment)) { gt_filter_sample_read_print_fastq(buffered_output,alignment->tag,alignment->read,alignment->qualities,false,1,1,0,0,read_length); // FASTQ } else { gt_filter_sample_read_print_fasta(buffered_output,alignment->tag,alignment->read,false,1,1,0,0,read_length); // FASTA } continue; } uint64_t split_step_size = gt_get_integer_proportion(parameters.split_step_size,read_length); if (split_step_size==0) split_step_size=1; const uint64_t split_left_trim = gt_get_integer_proportion(parameters.split_left_trim,read_length); const uint64_t split_right_trim = gt_get_integer_proportion(parameters.split_right_trim,read_length); const uint64_t full_chunks = ((read_length-split_left_trim-split_right_trim-split_chunk_size)/split_step_size)+1; uint64_t total_chunks = full_chunks; uint64_t left_trim=split_left_trim, right_trim=read_length-split_left_trim-split_chunk_size; // Check last chunk (remainder) const uint64_t last_left_trim = left_trim+(split_step_size*full_chunks); const uint64_t remainder_chunk = read_length-split_right_trim-last_left_trim; bool print_remainder_chunk = false; if (remainder_chunk > 0 && split_min_remainder > 0 && remainder_chunk < split_chunk_size && remainder_chunk >= split_min_remainder) { print_remainder_chunk = true; ++total_chunks; } uint64_t i; for (i=0;i<full_chunks;++i,left_trim+=split_step_size,right_trim-=split_step_size) { if (gt_alignment_has_qualities(alignment)) { gt_filter_sample_read_print_fastq( buffered_output,alignment->tag,alignment->read,alignment->qualities,true, i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTQ } else { gt_filter_sample_read_print_fasta( buffered_output,alignment->tag,alignment->read,true, i+1,total_chunks,left_trim,right_trim,split_chunk_size); // FASTA } } // Print last chunk (remainder) if (print_remainder_chunk) { if (gt_alignment_has_qualities(alignment)) { gt_filter_sample_read_print_fastq( buffered_output,alignment->tag,alignment->read,alignment->qualities,true, total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTQ } else { gt_filter_sample_read_print_fasta( buffered_output,alignment->tag,alignment->read,true, total_chunks,total_chunks,last_left_trim,split_right_trim,remainder_chunk); // FASTA } } } } GT_END_READING_WRITING_LOOP(input_file,output_file,template); } // Clean gt_input_file_close(input_file); gt_output_file_close(output_file); } GT_INLINE void gt_filter_print_insert_size_distribution() { // Open file IN/OUT gt_input_file* input_file = (parameters.name_input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input); gt_output_file* output_file = (parameters.name_output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE); // Parallel I/O #ifdef HAVE_OPENMP #pragma omp parallel num_threads(parameters.num_threads) #endif { GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) { // Print insert size if (gt_template_get_num_blocks(template)!=2) continue; GT_TEMPLATE_ITERATE_(template,mmap) { gt_status error_code; gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_template_get_insert_size(mmap,&error_code,0,0)); if (parameters.first_map) break; } } GT_END_READING_WRITING_LOOP(input_file,output_file,template); } // Clean gt_input_file_close(input_file); gt_output_file_close(output_file); } GT_INLINE void gt_filter_print_error_distribution() { // Open file IN/OUT gt_input_file* input_file = (parameters.name_input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input); gt_output_file* output_file = (parameters.name_output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE); // Parallel I/O #ifdef HAVE_OPENMP #pragma omp parallel num_threads(parameters.num_threads) #endif { GT_BEGIN_READING_WRITING_LOOP(input_file,output_file,parameters.paired_end,buffered_output,template) { // Print levenshtein distance of the maps if (parameters.first_map) { uint64_t best_distance = UINT64_MAX; GT_TEMPLATE_ITERATE_(template,mmap) { const uint64_t dist = gt_map_get_global_levenshtein_distance(*mmap); if (dist < best_distance) best_distance = dist; } if (best_distance < UINT64_MAX) gt_bofprintf(buffered_output,"%"PRIu64"\n",best_distance); } else { GT_TEMPLATE_ITERATE_(template,mmap) { gt_bofprintf(buffered_output,"%"PRIu64"\n",gt_map_get_global_levenshtein_distance(*mmap)); } } } GT_END_READING_WRITING_LOOP(input_file,output_file,template); } // Clean gt_input_file_close(input_file); gt_output_file_close(output_file); } /* * Handler for opening an archive (GEMIndex/MULTIFastaFile) */ gt_sequence_archive* gt_filter_open_sequence_archive(const bool load_sequences) { gt_sequence_archive* sequence_archive = NULL; gt_log("Loading reference file ..."); if (parameters.name_gem_index_file!=NULL) { // Load GEM-IDX sequence_archive = gt_sequence_archive_new(GT_BED_ARCHIVE); gt_gemIdx_load_archive(parameters.name_gem_index_file,sequence_archive,load_sequences); } else { gt_input_file* const reference_file = gt_input_file_open(parameters.name_reference_file,false); sequence_archive = gt_sequence_archive_new(GT_CDNA_ARCHIVE); if (gt_input_multifasta_parser_get_archive(reference_file,sequence_archive)!=GT_IFP_OK) { gt_fatal_error_msg("Error parsing reference file '%s'\n",parameters.name_reference_file); } gt_input_file_close(reference_file); } gt_log("Done."); return sequence_archive; } GT_INLINE void gt_filter_display_sequence_list(){ // Show sequence archive summary gt_sequence_archive* sequence_archive = gt_filter_open_sequence_archive(false); gt_sequence_archive_iterator sequence_archive_it; gt_sequence_archive_new_iterator(sequence_archive,&sequence_archive_it); gt_segmented_sequence* seq; while ((seq=gt_sequence_archive_iterator_next(&sequence_archive_it))) { fprintf(stdout,"%s\t%"PRIu64"\n",seq->seq_name->buffer,seq->sequence_total_length); } } /* * I/O Filtering Loop */ #define GT_FILTER_CHECK_PARSING_ERROR(FORMAT) \ ++record_num; \ if (error_code!=GT_IMP_OK) { \ gt_error_msg("[#%"PRIu64"]Fatal error parsing "FORMAT"file '%s', line %"PRIu64"\n", \ record_num,parameters.name_input_file,buffered_input->current_line_num-1); \ continue; \ } void gt_filter_read__write() { // Open file IN/OUT gt_input_file* input_file = (parameters.name_input_file==NULL) ? gt_input_stream_open(stdin) : gt_input_file_open(parameters.name_input_file,parameters.mmap_input); gt_output_file* output_file, *dicarded_output_file; // Open out file if (!parameters.no_output) { output_file = (parameters.name_output_file==NULL) ? gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE); if (parameters.discarded_output) { if (gt_streq(parameters.name_discarded_output_file,"stdout")) { dicarded_output_file = gt_output_stream_new(stdout,SORTED_FILE); } else if (gt_streq(parameters.name_discarded_output_file,"stderr")) { dicarded_output_file = gt_output_stream_new(stderr,SORTED_FILE); } else { dicarded_output_file = gt_output_file_new(parameters.name_discarded_output_file,SORTED_FILE); } } } // Open reference file gt_sequence_archive* sequence_archive = NULL; if (parameters.load_index) { sequence_archive = gt_filter_open_sequence_archive(true); } // read annotaiton if specified if (parameters.annotation != NULL && parameters.perform_annotation_filter) { parameters.gtf = gt_gtf_read_from_file(parameters.annotation, parameters.num_threads); } // Parallel reading+process uint64_t total_algs_checked=0, total_algs_correct=0, total_maps_checked=0, total_maps_correct=0; #ifdef HAVE_OPENMP #pragma omp parallel num_threads(parameters.num_threads) reduction(+:total_algs_checked,total_algs_correct,total_maps_checked,total_maps_correct) #endif { // Prepare IN/OUT buffers & printers gt_status error_code; gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file); gt_buffered_output_file *buffered_output = NULL, *buffered_discarded_output = NULL; if (!parameters.no_output) { buffered_output = gt_buffered_output_file_new(output_file); gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_output); if (parameters.discarded_output) { buffered_discarded_output = gt_buffered_output_file_new(dicarded_output_file); gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_discarded_output); } } // Prepare IN/OUT parser/printer attributes gt_generic_printer_attributes *generic_printer_attributes=NULL, *discarded_output_attributes=NULL; if (parameters.output_format==FILE_FORMAT_UNKNOWN) parameters.output_format = input_file->file_format; // Select output format generic_printer_attributes = gt_generic_printer_attributes_new(parameters.output_format); if (parameters.discarded_output) { gt_file_format output_format = input_file->file_format; if (parameters.discarded_output_format!=FILE_FORMAT_UNKNOWN) output_format=parameters.discarded_output_format; discarded_output_attributes = gt_generic_printer_attributes_new(output_format); } /* * READ + PROCCESS Loop */ uint64_t record_num = 0; gt_template* template = gt_template_new(); if (parameters.check_format && parameters.check_file_format==FASTA) { /* * FASTA I/O loop */ while ((error_code=gt_input_fasta_parser_get_template(buffered_input,template,parameters.paired_end))) { GT_FILTER_CHECK_PARSING_ERROR("FASTA "); // Apply all filters and print gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template, &total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct, buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes); } } else if (parameters.check_format && parameters.check_file_format==MAP) { /* * MAP I/O loop */ gt_map_parser_attributes* const attr = gt_input_map_parser_attributes_new(parameters.paired_end); while ((error_code=gt_input_map_parser_get_template(buffered_input,template,attr))) { GT_FILTER_CHECK_PARSING_ERROR("MAP "); // Apply all filters and print gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template, &total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct, buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes); } gt_input_map_parser_attributes_delete(attr); } else if (parameters.check_format && parameters.check_file_format==SAM) { /* * SAM I/O loop */ gt_sam_parser_attributes* const attr = gt_input_sam_parser_attributes_new(); while ((error_code=gt_input_sam_parser_get_template(buffered_input,template,attr))) { GT_FILTER_CHECK_PARSING_ERROR("SAM "); // Apply all filters and print gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template, &total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct, buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes); } gt_input_sam_parser_attributes_delete(attr); } else { /* * Generic I/O loop */ gt_generic_parser_attributes* generic_parser_attributes = gt_input_generic_parser_attributes_new(parameters.paired_end); gt_input_map_parser_attributes_set_max_parsed_maps(generic_parser_attributes->map_parser_attributes,parameters.max_input_matches); // Limit max-matches while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,generic_parser_attributes))) { GT_FILTER_CHECK_PARSING_ERROR(""); // Apply all filters and print gt_filter__print(input_file->file_format,buffered_input->current_line_num-1,sequence_archive,template, &total_algs_checked,&total_algs_correct,&total_maps_checked,&total_maps_correct, buffered_output,generic_printer_attributes,buffered_discarded_output,discarded_output_attributes); } gt_input_generic_parser_attributes_delete(generic_parser_attributes); } // Clean gt_template_delete(template); gt_buffered_input_file_close(buffered_input); gt_generic_printer_attributes_delete(generic_printer_attributes); if (!parameters.no_output) { gt_buffered_output_file_close(buffered_output); if (parameters.discarded_output) gt_buffered_output_file_close(buffered_discarded_output); } } /* * Print check report */ if (parameters.check) { gt_log("Checked %lu alignments. Total.Correct %lu (%2.3f %%). Total.Maps.Correct %lu (%2.3f %%)", total_algs_checked,total_algs_correct,GT_GET_PERCENTAGE(total_algs_correct,total_algs_checked), total_maps_correct,GT_GET_PERCENTAGE(total_maps_correct,total_maps_checked)); } // Release archive & Clean if (sequence_archive) gt_sequence_archive_delete(sequence_archive); gt_filter_delete_map_ids(parameters.map_ids); if (parameters.quality_score_ranges!=NULL) gt_vector_delete(parameters.quality_score_ranges); gt_input_file_close(input_file); if (!parameters.no_output) { gt_output_file_close(output_file); if (parameters.discarded_output) gt_output_file_close(dicarded_output_file); } } /* * Argument Parsing */ void gt_filter_get_coma_separated_arguments_long(char* const parameters_list,const uint64_t num_params,...) { uint64_t num_params_parsed = 0; // Start va_args va_list v_args; va_start(v_args,num_params); // Start parsing char *opt = strtok(parameters_list,","); while (opt!=NULL && num_params_parsed<num_params) { uint64_t* const uint64_arg = va_arg(v_args,uint64_t*); *uint64_arg = atoll(opt); opt = strtok(NULL,","); } // End va_args va_end(v_args); } GT_INLINE uint64_t gt_filter_get_coma_separated_arguments_float(char* const parameters_list,const uint64_t num_params,...) { uint64_t num_params_parsed = 0; // Start va_args va_list v_args; va_start(v_args,num_params); // Start parsing char *opt = strtok(parameters_list,","); while (opt!=NULL && num_params_parsed<num_params) { float* const float_arg = va_arg(v_args,float*); *float_arg = atof(opt); opt = strtok(NULL,","); ++num_params_parsed; } // End va_args va_end(v_args); return num_params_parsed; } void gt_filter_get_discarded_output_arguments(char* const optarg) { // Start parsing char *opt = strtok(optarg,","); parameters.name_discarded_output_file = opt; opt = strtok(NULL,","); // Next if (opt!=NULL) { if (gt_streq(opt,"FASTA")) { parameters.discarded_output_format = FASTA; } else if (gt_streq(opt,"MAP")) { parameters.discarded_output_format = MAP; } else if (gt_streq(opt,"SAM")) { parameters.discarded_output_format = SAM; } else { gt_fatal_error_msg("Output format '%s' not recognized",opt); } } } void gt_filter_get_argument_pair_strandness(char* const strandness_opt) { char *opt; opt = strtok(strandness_opt,","); while (opt!=NULL) { if (gt_streq(opt,"FR")) { parameters.allow_strand_fr = true; } else if (gt_streq(opt,"RF")) { parameters.allow_strand_rf = true; } else if (gt_streq(opt,"FF")) { parameters.allow_strand_ff = true; } else if (gt_streq(opt,"RR")) { parameters.allow_strand_rr = true; } else { gt_fatal_error_msg("Strandedness option not recognized '%s'\n",opt); } opt = strtok(NULL,","); // Reload } parameters.filter_by_strand_pe = true; } void gt_filter_get_argument_map_id(char* const maps_ids) { // Allocate vector parameters.map_ids = gt_vector_new(20,sizeof(gt_string*)); // Add all the valid map Ids (sequence names) char *opt; opt = strtok(maps_ids,","); while (opt!=NULL) { // Get id gt_string* map_id = gt_string_new(0); gt_string_set_string(map_id,opt); // Add to the vector gt_vector_insert(parameters.map_ids,map_id,gt_string*); // Next opt = strtok(NULL,","); // Reload } } void gt_filter_get_argument_gtf_type(char* const maps_ids) { // Allocate vector parameters.gtf_types = gt_shash_new(); // Add all the valid map Ids (sequence names) char *opt; opt = strtok(maps_ids,","); while (opt!=NULL) { // Get id gt_shash_insert(parameters.gtf_types, opt, true, bool); // Next opt = strtok(NULL,","); // Reload } } void parse_arguments(int argc,char** argv) { struct option* gt_filter_getopt = gt_options_adaptor_getopt(gt_filter_options); gt_string* const gt_filter_short_getopt = gt_options_adaptor_getopt_short(gt_filter_options); int option, option_index; while (true) { // Get option & Select case if ((option=getopt_long(argc,argv, gt_string_get_string(gt_filter_short_getopt),gt_filter_getopt,&option_index))==-1) break; switch (option) { /* I/O */ case 'i': parameters.name_input_file = optarg; break; case 'o': parameters.name_output_file = optarg; if (gt_streq(optarg,"null")) parameters.no_output = true; break; case 'r': parameters.name_reference_file = optarg; break; case 'I': parameters.name_gem_index_file = optarg; break; case 200: // annotation parameters.annotation = optarg; break; case 201: parameters.mmap_input = true; break; case 'p': parameters.paired_end = true; break; case 202: // output-format if (gt_streq(optarg,"FASTA")) { parameters.output_format = FASTA; } else if (gt_streq(optarg,"MAP")) { parameters.output_format = MAP; } else if (gt_streq(optarg,"SAM")) { parameters.output_format = SAM; } else { gt_fatal_error_msg("Output format '%s' not recognized",optarg); } break; case 203: // discarded-output parameters.discarded_output = true; gt_filter_get_discarded_output_arguments(optarg); break; case 204: // no-output parameters.no_output = true; break; case 205: // check-duplicates parameters.check_duplicates = true; break; /* Filter Read/Qualities */ case 300: // hard-trim parameters.hard_trim = true; gt_filter_get_coma_separated_arguments_long(optarg,2,&(parameters.left_trim),&(parameters.right_trim)); break; case 301: // quality-trim gt_fatal_error(NOT_IMPLEMENTED); break; case 302: // restore-trim parameters.restore_trim = true; break; case 303: // uniform-read parameters.uniform_read = true; if (optarg && gt_streq(optarg,"strict")) parameters.uniform_read_strict = true; break; case 304: // qualities-to-offset-33 parameters.qualities_to_offset_33 = true; break; case 305: // qualities-to-offset-64 parameters.qualities_to_offset_64 = true; break; case 306: // remove-qualities parameters.remove_qualities = true; break; case 307: // add-qualities parameters.add_qualities = true; break; /* Filter Template/Alignments */ case 400: parameters.mapped = true; break; case 401: parameters.unmapped = true; break; case 402: parameters.unique_level = atoll(optarg); break; case 403: parameters.min_length = atof(optarg); break; case 404: parameters.max_length = atof(optarg); break; case 405: parameters.min_maps = atof(optarg); break; case 406: parameters.max_maps = atof(optarg); break; /* Filter Maps */ case 500: // first-map parameters.perform_dna_map_filter = true; parameters.first_map = true; break; case 'k': // keep-first-map parameters.keep_first_map = true; break; case 'u': // keep-unique parameters.keep_unique = true; break; case 'd': // max-decoded-matches parameters.matches_pruning = true; parameters.max_decoded_matches = atoll(optarg); break; case 'D': // min-decoded-strata parameters.matches_pruning = true; parameters.min_decoded_strata = atoll(optarg); break; case 501: // max-output-matches parameters.matches_pruning = true; parameters.max_output_matches = atoll(optarg); break; case 502: // max-input-matches parameters.max_input_matches = atoll(optarg); break; case 503: // max-strata-after-map parameters.perform_dna_map_filter = true; parameters.max_strata_after_map = atof(optarg); break; case 504: // make-counters parameters.make_counters = true; break; case 505: // min-strata parameters.perform_dna_map_filter = true; parameters.min_event_distance = atof(optarg); break; case 506: // max-strata parameters.perform_dna_map_filter = true; parameters.max_event_distance = atof(optarg); break; case 507: // min-levenshtein-error parameters.perform_dna_map_filter = true; parameters.min_levenshtein_distance = atof(optarg); break; case 508: // max-levenshtein-error parameters.perform_dna_map_filter = true; parameters.max_levenshtein_distance = atof(optarg); break; case 509: // map-id parameters.perform_dna_map_filter = true; gt_filter_get_argument_map_id(optarg); break; case 510: // strandedness parameters.perform_dna_map_filter = true; parameters.filter_by_strand_se = true; if (gt_streq(optarg,"F")) { parameters.allow_strand_f = true; } else if (gt_streq(optarg,"R")) { parameters.allow_strand_r = true; } else { gt_fatal_error_msg("Strand '%s' not recognized {'F','R'}",optarg); } break; case 511: // filter-quality parameters.perform_dna_map_filter = true; gt_filter_quality_range qrange; gt_filter_get_coma_separated_arguments_long(optarg,2,&(qrange.min),&(qrange.max)); // Add it to the vector of ranges if (parameters.quality_score_ranges==NULL) { parameters.quality_score_ranges = gt_vector_new(4,sizeof(gt_filter_quality_range)); } gt_vector_insert(parameters.quality_score_ranges,qrange,gt_filter_quality_range); break; case 512: // reduce-to-level parameters.perform_dna_map_filter = true; parameters.reduce_to_unique_strata = atol(optarg); break; case 513: // reduce-by-quality parameters.perform_dna_map_filter = true; parameters.reduce_by_quality = atol(optarg); break; case 514: // reduce-by-annotation parameters.reduce_by_gene_id = true; parameters.perform_annotation_filter = true; break; case 515: // reduce-to-unique parameters.reduce_to_unique = atol(optarg); break; case 516: // reduce-to-pairs parameters.reduce_to_pairs = true; break; case 517: // reduce-to-protein-coding parameters.reduce_to_protein_coding = true; parameters.perform_annotation_filter = true; break; case 518: // reduce-by_junctions parameters.reduce_by_junctions = true; parameters.perform_annotation_filter = true; break; /* Filter RNA-Maps */ case 600: // no-split-maps parameters.no_split_maps = true; parameters.perform_rna_map_filter = true; break; case 601: // only-split-maps parameters.only_split_maps = true; parameters.perform_rna_map_filter = true; break; case 's': // no-penalty-for-splitmaps parameters.no_penalty_for_splitmaps = true; break; case 603: // min-intron-length parameters.min_intron_length = atol(optarg); parameters.perform_rna_map_filter = true; break; case 604: // min-block-length parameters.min_block_length = atol(optarg); parameters.perform_rna_map_filter = true; break; /* Filter PE-Maps */ case 700: // pair-strandness parameters.perform_dna_map_filter = true; gt_filter_get_argument_pair_strandness(optarg); break; case 701: // min-inss parameters.perform_dna_map_filter = true; parameters.min_inss = atoll(optarg); break; case 702: // max-inss parameters.perform_dna_map_filter = true; parameters.max_inss = atoll(optarg); break; /* Realign/Check */ case 800: // mismatch-recovery parameters.load_index = true; parameters.mismatch_recovery = true; break; case 801: // hamming-realign parameters.load_index = true; parameters.realign_hamming = true; break; case 802: // levenshtein-realign parameters.load_index = true; parameters.realign_levenshtein = true; break; /* Checking/Report */ case 'c': // check parameters.load_index = true; parameters.check = true; break; case 'C': // check-only parameters.load_index = true; parameters.check = true; parameters.no_output = true; break; case 803: // check-format parameters.check_format = true; if (gt_streq(optarg,"FASTA")) { parameters.check_file_format = FASTA; } else if (gt_streq(optarg,"MAP")) { parameters.check_file_format = MAP; } else if (gt_streq(optarg,"SAM")) { parameters.check_file_format = SAM; } else { gt_fatal_error_msg("Check format '%s' not recognized",optarg); } break; /* Split/Grouping */ case 900: // split-read gt_fatal_error(NOT_IMPLEMENTED); break; case 901: // sample-read parameters.special_functionality = true; parameters.sample_read = true; gt_cond_fatal_error_msg(gt_filter_get_coma_separated_arguments_float(optarg,5, &(parameters.split_chunk_size),&(parameters.split_step_size), &(parameters.split_left_trim),&(parameters.split_right_trim), &(parameters.split_min_remainder))<4, "Too few parameters provided to option --split-read"); break; case 902: // group-read-chunks parameters.special_functionality = true; parameters.group_reads = true; break; /* Display/Information */ case 1000: parameters.special_functionality = true; parameters.error_plot = true; break; case 1001: parameters.special_functionality = true; parameters.insert_size_plot = true; break; case 1002: parameters.special_functionality = true; parameters.load_index = true; parameters.show_sequence_list = true; break; case 1003: parameters.special_functionality = true; parameters.load_index = true; parameters.display_pretty = true; break; /* Misc */ case 't': // threads #ifdef HAVE_OPENMP parameters.num_threads = atol(optarg); #endif gt_cond_fatal_error_msg(parameters.num_threads > GT_MAX_OUTPUT_BUFFERS, "Excessive number of threads (maximum %"PRId32")",GT_MAX_OUTPUT_BUFFERS); break; case 'v': // verbose parameters.verbose = true; break; case 'h': // help fprintf(stderr, "USE: ./gt.filter [ARGS]...\n"); gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,false); exit(1); case 'H': // full-help fprintf(stderr, "USE: ./gt.filter [ARGS]...\n"); gt_options_fprint_menu(stderr,gt_filter_options,gt_filter_groups,false,true); exit(1); case 'J': gt_options_fprint_json_menu(stderr,gt_filter_options,gt_filter_groups,true,false); exit(1); break; case '?': default: gt_fatal_error_msg("Option not recognized"); } } /* * Parameters check */ if (parameters.load_index && parameters.name_reference_file==NULL && parameters.name_gem_index_file==NULL) { gt_fatal_error_msg("Reference file required"); } // Free gt_string_delete(gt_filter_short_getopt); } /* * Main */ int main(int argc,char** argv) { // GT error handler gt_handle_error_signals(); // Parsing command-line options parse_arguments(argc,argv); /* * Select functionality */ if (parameters.show_sequence_list) { gt_filter_display_sequence_list(); } else if (parameters.group_reads) { gt_filter_group_reads(); } else if (parameters.sample_read) { gt_filter_sample_read(); // Depreciated } else if (parameters.error_plot) { gt_filter_print_insert_size_distribution(); } else if (parameters.insert_size_plot) { gt_filter_print_error_distribution(); // Depreciated } else { gt_filter_read__write(); // Filter !! } return 0; }
LRD_precond.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <omp.h> #include "H2Pack.h" #include "LRD_precond.h" // Evaluate a kernel matrix with OpenMP parallelization extern void H2P_eval_kernel_matrix_OMP( const void *krnl_param, kernel_eval_fptr krnl_eval, const int krnl_dim, H2P_dense_mat_p x_coord, H2P_dense_mat_p y_coord, H2P_dense_mat_p kernel_mat ); // Construct a LRD_precond from a H2Pack structure using Nystrom method with random sampling void H2P_build_LRD_precond(H2Pack_p h2pack, const int rank, const DTYPE shift, LRD_precond_p *precond_) { LRD_precond_p precond = (LRD_precond_p) malloc(sizeof(LRD_precond_s)); assert(precond != NULL); int n_point = h2pack->n_point; int n_thread = h2pack->n_thread; int pt_dim = h2pack->pt_dim; int mat_size = h2pack->krnl_mat_size; int krnl_dim = h2pack->krnl_dim; int nrow = rank * krnl_dim; double st = get_wtime_sec(); int *flag = (int*) malloc(sizeof(int) * n_point); ASSERT_PRINTF(flag != NULL, "Failed to allocate work array of size %d for LRD preconditioner\n", n_point); memset(flag, 0, sizeof(int) * n_point); H2P_int_vec_p skel_idx; H2P_dense_mat_p coord_all, coord_skel; H2P_int_vec_init(&skel_idx, rank); H2P_dense_mat_init(&coord_all, pt_dim, n_point); H2P_dense_mat_init(&coord_skel, pt_dim, n_point); memcpy(coord_all->data, h2pack->coord, sizeof(DTYPE) * pt_dim * n_point); memcpy(coord_skel->data, h2pack->coord, sizeof(DTYPE) * pt_dim * n_point); // Not working? int cnt = 0; while (cnt < rank) { int idx = rand() % n_point; if (flag[idx] == 0) { flag[idx] = 1; skel_idx->data[cnt] = idx; cnt++; } } //for (int i = 0; i < rank; i++) skel_idx->data[i] = i * n_point / rank; skel_idx->length = rank; H2P_dense_mat_select_columns(coord_skel, skel_idx); int info; H2P_dense_mat_p L, Ut, tmp; H2P_dense_mat_init(&L, nrow, nrow); H2P_dense_mat_init(&Ut, nrow, mat_size); H2P_dense_mat_init(&tmp, nrow, mat_size); // L = kernel({coord(idx, :), coord(idx, :)}); // Ut = kernel({coord(idx, :), coord}); H2P_eval_kernel_matrix_OMP(h2pack->krnl_param, h2pack->krnl_eval, krnl_dim, coord_skel, coord_skel, L); H2P_eval_kernel_matrix_OMP(h2pack->krnl_param, h2pack->krnl_eval, krnl_dim, coord_skel, coord_all, Ut); // [V, D] = eig(S); // Ut = inv(D) * V' * Ut; info = LAPACK_SYEVD(LAPACK_ROW_MAJOR, 'V', 'L', nrow, L->data, L->ld, tmp->data); ASSERT_PRINTF(info == 0, "Eigen decomposition for S matrix failed and returned %d, matrix size = %d\n", info, nrow); DTYPE *V = L->data, *D = tmp->data; DTYPE max_diag = 0.0; for (int i = 0; i < nrow; i++) { if (D[i] < 0.0) WARNING_PRINTF("S matrix %d-th eigenvalue = %e < 0!\n", i+1, D[i]); if (D[i] > max_diag) max_diag = D[i]; } for (int i = 0; i < nrow; i++) D[i] = (D[i] >= 1e-10 * max_diag) ? D[i] = 1.0 / sqrt(D[i]) : 0.0; #pragma omp parallel for for (int i = 0; i < nrow; i++) { DTYPE *V_i = V + i * nrow; #pragma omp simd for (int j = 0; j < nrow; j++) V_i[j] *= D[j]; } CBLAS_GEMM( CblasRowMajor, CblasTrans, CblasNoTrans, nrow, mat_size, nrow, 1.0, V, nrow, Ut->data, mat_size, 0.0, tmp->data, mat_size ); #pragma omp parallel for for (int i = 0; i < nrow * mat_size; i++) Ut->data[i] = tmp->data[i]; // tmp = Ut * Ut' + shift * eye(lr_rank); H2P_dense_mat_resize(tmp, nrow, nrow); CBLAS_GEMM( CblasRowMajor, CblasNoTrans, CblasTrans, nrow, nrow, mat_size, 1.0, Ut->data, Ut->ld, Ut->data, Ut->ld, 0.0, tmp->data, tmp->ld ); for (int i = 0; i < nrow; i++) tmp->data[i * tmp->ld + i] += shift; // tmp = chol(tmp, 'lower'); // Ut = linsolve(tmp, Ut, struct('LT', true)); info = LAPACK_POTRF(LAPACK_ROW_MAJOR, 'L', nrow, tmp->data, tmp->ld); ASSERT_PRINTF(info == 0, "Cholesky decomposition failed and return %d, matrix size = %d\n", info, nrow); CBLAS_TRSM( CblasRowMajor, CblasLeft, CblasLower, CblasNoTrans, CblasNonUnit, nrow, mat_size, 1.0, tmp->data, tmp->ld, Ut->data, Ut->ld ); DTYPE *Ut_ = (DTYPE*) malloc(sizeof(DTYPE) * nrow * mat_size); DTYPE *workbuf = (DTYPE*) malloc(sizeof(DTYPE) * nrow); ASSERT_PRINTF( Ut_ != NULL && workbuf != NULL, "Failed to allocate matrix of size %d * %d for LRD preconditioner\n", nrow, mat_size+1 ); memcpy(Ut_, Ut->data, sizeof(DTYPE) * nrow * mat_size); H2P_dense_mat_destroy(&coord_all); H2P_dense_mat_destroy(&coord_skel); H2P_dense_mat_destroy(&L); H2P_dense_mat_destroy(&Ut); H2P_dense_mat_destroy(&tmp); H2P_int_vec_destroy(&skel_idx); free(coord_all); free(coord_skel); free(skel_idx); size_t pmt_idx_bytes = sizeof(int) * mat_size; size_t pmt_vec_bytes = sizeof(DTYPE) * mat_size; int *fwd_pmt = (int*) malloc(pmt_idx_bytes); int *bwd_pmt = (int*) malloc(pmt_idx_bytes); DTYPE *pmt_b = (DTYPE*) malloc(pmt_vec_bytes); DTYPE *pmt_x = (DTYPE*) malloc(pmt_vec_bytes); ASSERT_PRINTF( fwd_pmt != NULL && bwd_pmt != NULL && pmt_b != NULL && pmt_x != NULL, "Failed to allocate vector permutation arrays for FSAI preconditioner\n" ); memcpy(fwd_pmt, h2pack->fwd_pmt_idx, pmt_idx_bytes); memcpy(bwd_pmt, h2pack->bwd_pmt_idx, pmt_idx_bytes); size_t total_msize = sizeof(DTYPE) * nrow * (mat_size + 1); total_msize += 2 * (pmt_idx_bytes + pmt_vec_bytes); double et = get_wtime_sec(); precond->mat_size = mat_size; precond->rank = nrow; precond->shift = shift; precond->Ut = Ut_; precond->pmt_b = pmt_b; precond->pmt_x = pmt_x; precond->fwd_pmt = fwd_pmt; precond->bwd_pmt = bwd_pmt; precond->workbuf = workbuf; precond->t_build = et - st; precond->t_apply = 0.0; precond->n_apply = 0; precond->mem_MB = (double) total_msize / 1048576.0; *precond_ = precond; } // Apply LRD preconditioner, x := M_{LRD}^{-1} * b void LRD_precond_apply(LRD_precond_p precond, const DTYPE *b, DTYPE *x) { if (precond == NULL) return; int mat_size = precond->mat_size; int rank = precond->rank; int *fwd_pmt = precond->fwd_pmt; int *bwd_pmt = precond->bwd_pmt; DTYPE shift = precond->shift; DTYPE *Ut = precond->Ut; DTYPE *pmt_b = precond->pmt_b; DTYPE *pmt_x = precond->pmt_x; DTYPE *workbuf = precond->workbuf; double st = get_wtime_sec(); gather_vector_elements(sizeof(DTYPE), mat_size, fwd_pmt, b, pmt_b); // x = 1 / shift * (b - Ut' * (Ut * b)); memcpy(pmt_x, pmt_b, sizeof(DTYPE) * mat_size); CBLAS_GEMV(CblasRowMajor, CblasNoTrans, rank, mat_size, 1.0, Ut, mat_size, pmt_b, 1, 0.0, workbuf, 1); CBLAS_GEMV(CblasRowMajor, CblasTrans, rank, mat_size, -1.0, Ut, mat_size, workbuf, 1, 1.0, pmt_x, 1); DTYPE inv_shift = 1.0 / shift; #pragma omp simd for (int i = 0; i < mat_size; i++) pmt_x[i] *= inv_shift; gather_vector_elements(sizeof(DTYPE), mat_size, bwd_pmt, pmt_x, x); double et = get_wtime_sec(); precond->t_apply += et - st; precond->n_apply++; } // Destroy a LRD_precond structure void LRD_precond_destroy(LRD_precond_p *precond_) { LRD_precond_p precond = *precond_; if (precond == NULL) return; free(precond->Ut); free(precond->pmt_b); free(precond->pmt_x); free(precond->fwd_pmt); free(precond->bwd_pmt); free(precond->workbuf); free(precond); *precond_ = NULL; } // Print statistic info of a FSAI_precond structure void LRD_precond_print_stat(LRD_precond_p precond) { if (precond == NULL) return; printf( "LRD precond used memory = %.2lf MB, build time = %.3lf sec, apply avg time = %.3lf sec\n", precond->mem_MB, precond->t_build, precond->t_apply / (double) precond->n_apply ); }
createlut.c
// this uses the coefficient cube optimiser from the paper: // // Wenzel Jakob and Johannes Hanika. A low-dimensional function space for // efficient spectral upsampling. Computer Graphics Forum (Proceedings of // Eurographics), 38(2), March 2019. // run like // make && ./createlut 512 lut.pfm XYZ && eu lut.pfm -w 1400 -h 1400 // creates spectra.lut (c0*1e5 y l s)/(x y) and abney.lut (x y)/(s l) #include <math.h> #include <string.h> #include <strings.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "details/lu.h" #include "details/matrices.h" #include "clip.h" #include "inpaint.h" #include "q2t.h" #include "../o-pfm/half.h" #include "../../../core/core.h" int use_bad_cmf = 0; // okay let's also hack the cie functions to our taste (or the gpu approximations we'll do) #define BAD_SAMPLES 30 #define BAD_FINE_SAMPLES 30 #define BAD_LAMBDA_MIN 400.0 #define BAD_LAMBDA_MAX 700.0 /// Discretization of quadrature scheme #define CIE_SAMPLES 95 #define CIE_LAMBDA_MIN 360.0 #define CIE_LAMBDA_MAX 830.0 #define CIE_FINE_SAMPLES ((CIE_SAMPLES - 1) * 3 + 1) #define RGB2SPEC_EPSILON 1e-4 #define MOM_EPS 1e-3 #include "details/cie1931.h" /// Precomputed tables for fast spectral -> RGB conversion double lambda_tbl[CIE_FINE_SAMPLES], rgb_tbl[3][CIE_FINE_SAMPLES], rgb_to_xyz[3][3], xyz_to_rgb[3][3], xyz_whitepoint[3]; /// Currently supported gamuts typedef enum Gamut { SRGB, ProPhotoRGB, ACES2065_1, ACES_AP1, REC2020, ERGB, XYZ, } Gamut; double sigmoid(double x) { return 0.5 * x / sqrt(1.0 + x * x) + 0.5; } double sqrd(double x) { return x * x; } void cvt_c0yl_c012(const double *c0yl, double *coeffs) { coeffs[0] = c0yl[0]; coeffs[1] = c0yl[2] * -2.0 * c0yl[0]; coeffs[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; } void cvt_c012_c0yl(const double *coeffs, double *c0yl) { // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; double A2 = (double)(A*(sqrd(c1))); double B2 = (double)(B*c1 - 2.0*A*c0*(sqrd(c1))); double C2 = (double)(C - B*c0*c1 + A*(sqrd(c0*c1))); if(fabs(A2) < 1e-12) { c0yl[0] = c0yl[1] = c0yl[2] = 0.0; return; } // convert to c0 y dom-lambda: c0yl[0] = A2; // square slope stays c0yl[2] = B2 / (-2.0*A2); // dominant wavelength c0yl[1] = C2 - B2*B2 / (4.0 * A2); // y } void quantise_coeffs(double coeffs[3], float out[3]) { // account for normalising lambda: double c0 = CIE_LAMBDA_MIN, c1 = 1.0 / (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN); double A = coeffs[0], B = coeffs[1], C = coeffs[2]; const double A2 = (A*(sqrd(c1))); const double B2 = (B*c1 - 2*A*c0*(sqrd(c1))); const double C2 = (C - B*c0*c1 + A*(sqrd(c0*c1))); out[0] = (float)A2; out[1] = (float)B2; out[2] = (float)C2; #if 0 // DEBUG vis if(fabs(A2) < 1e-12) { out[0] = out[1] = out[2] = 0.0; return; } // convert to c0 y dom-lambda: out[0] = A2; // square slope stays out[1] = C2 - B2*B2 / (4.0 * A2); // y out[2] = B2 / (-2.0*A2); // dominant wavelength out[2] = (out[2] - c0)*c1; // normalise to [0,1] range for vis #endif } void init_coeffs(double coeffs[3]) { coeffs[0] = 0.0; coeffs[1] = 1.0; coeffs[2] = 0.0; } void clamp_coeffs(double coeffs[3]) { double max = fmax(fmax(fabs(coeffs[0]), fabs(coeffs[1])), fabs(coeffs[2])); if (max > 1000) { for (int j = 0; j < 3; ++j) coeffs[j] *= 1000 / max; } #if 0 // clamp dom lambda to visible range: // this will cause the fitter to diverge on the ridge. double c0yl[3]; c0yl[0] = coeffs[0]; if(fabs(coeffs[0]) < 1e-12) return; c0yl[2] = coeffs[1] / (-2.0*coeffs[0]); c0yl[1] = coeffs[2] - coeffs[1]*coeffs[1] / (4.0 * coeffs[0]); c0yl[2] = CLAMP(c0yl[2], 0.0, 1.0); coeffs[0] = c0yl[0]; coeffs[1] = c0yl[2] * -2.0 * c0yl[0]; coeffs[2] = c0yl[1] + c0yl[0] * c0yl[2] * c0yl[2]; #endif } int check_gamut(double rgb[3]) { double xyz[3] = {0.0}; for(int j=0;j<3;j++) for(int i=0;i<3;i++) xyz[i] += rgb_to_xyz[i][j] * rgb[j]; double x = xyz[0] / (xyz[0] + xyz[1] + xyz[2]); double y = xyz[1] / (xyz[0] + xyz[1] + xyz[2]); return spectrum_outside(x, y); } /** * This function precomputes tables used to convert arbitrary spectra * to RGB (either sRGB or ProPhoto RGB) * * A composite quadrature rule integrates the CIE curves, reflectance, and * illuminant spectrum over each 5nm segment in the 360..830nm range using * Simpson's 3/8 rule (4th-order accurate), which evaluates the integrand at * four positions per segment. While the CIE curves and illuminant spectrum are * linear over the segment, the reflectance could have arbitrary behavior, * hence the extra precations. */ void init_tables(Gamut gamut) { memset(rgb_tbl, 0, sizeof(rgb_tbl)); memset(xyz_whitepoint, 0, sizeof(xyz_whitepoint)); const double *illuminant = 0; switch (gamut) { case SRGB: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_srgb, sizeof(double) * 9); memcpy(rgb_to_xyz, srgb_to_xyz, sizeof(double) * 9); break; case ERGB: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_ergb, sizeof(double) * 9); memcpy(rgb_to_xyz, ergb_to_xyz, sizeof(double) * 9); break; case XYZ: illuminant = cie_e; memcpy(xyz_to_rgb, xyz_to_xyz, sizeof(double) * 9); memcpy(rgb_to_xyz, xyz_to_xyz, sizeof(double) * 9); break; case ProPhotoRGB: illuminant = cie_d50; memcpy(xyz_to_rgb, xyz_to_prophoto_rgb, sizeof(double) * 9); memcpy(rgb_to_xyz, prophoto_rgb_to_xyz, sizeof(double) * 9); break; case ACES2065_1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces2065_1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces2065_1_to_xyz, sizeof(double) * 9); break; case ACES_AP1: illuminant = cie_d60; memcpy(xyz_to_rgb, xyz_to_aces_ap1, sizeof(double) * 9); memcpy(rgb_to_xyz, aces_ap1_to_xyz, sizeof(double) * 9); break; case REC2020: illuminant = cie_d65; memcpy(xyz_to_rgb, xyz_to_rec2020, sizeof(double) * 9); memcpy(rgb_to_xyz, rec2020_to_xyz, sizeof(double) * 9); break; } for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { double h, lambda, weight; if(!use_bad_cmf) { h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (CIE_FINE_SAMPLES - 1.0); lambda = CIE_LAMBDA_MIN + i * h; weight = 3.0 / 8.0 * h; if (i == 0 || i == CIE_FINE_SAMPLES - 1) ; else if ((i - 1) % 3 == 2) weight *= 2.f; else weight *= 3.f; } else { h = (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES; lambda = CIE_LAMBDA_MIN + (i+0.5) * h; weight = h; } double xyz[3] = { cie_interp(cie_x, lambda), cie_interp(cie_y, lambda), cie_interp(cie_z, lambda) }; const double I = cie_interp(illuminant, lambda); #if 0 // output table for shader code double out[3] = {0.0}; for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) out[k] += xyz_to_rgb[k][j] * xyz[j]; fprintf(stderr, "vec3(%g, %g, %g), // %g nm\n", out[0], out[1], out[2], lambda); #endif lambda_tbl[i] = lambda; for (int k = 0; k < 3; ++k) for (int j = 0; j < 3; ++j) rgb_tbl[k][i] += xyz_to_rgb[k][j] * xyz[j] * I * weight; for (int k = 0; k < 3; ++k) xyz_whitepoint[k] += xyz[k] * I * weight; } } void eval_residual(const double *coeff, const double *rgb, double *residual) { double out[3] = { 0.0, 0.0, 0.0 }; for (int i = 0; i < CIE_FINE_SAMPLES; ++i) { // the optimiser doesn't like nanometers. // we'll do the normalised lambda thing and later convert when we write out. double lambda; if(use_bad_cmf) lambda = (i+.5)/(double)CIE_FINE_SAMPLES; else lambda = i/(double)CIE_FINE_SAMPLES; double cf[3] = {coeff[0], coeff[1], coeff[2]}; /* Polynomial */ double x = 0.0; for (int i = 0; i < 3; ++i) x = x * lambda + cf[i]; /* Sigmoid */ double s = sigmoid(x); /* Integrate against precomputed curves */ for (int j = 0; j < 3; ++j) out[j] += rgb_tbl[j][i] * s; } memcpy(residual, rgb, sizeof(double) * 3); for (int j = 0; j < 3; ++j) residual[j] -= out[j]; } void eval_jacobian(const double *coeffs, const double *rgb, double **jac) { double r0[3], r1[3], tmp[3]; for (int i = 0; i < 3; ++i) { memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] -= RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r0); memcpy(tmp, coeffs, sizeof(double) * 3); tmp[i] += RGB2SPEC_EPSILON; eval_residual(tmp, rgb, r1); for(int j=0;j<3;j++) assert(r1[j] == r1[j]); for(int j=0;j<3;j++) assert(r0[j] == r0[j]); for (int j = 0; j < 3; ++j) jac[j][i] = (r1[j] - r0[j]) * 1.0 / (2 * RGB2SPEC_EPSILON); } } double gauss_newton(const double rgb[3], double coeffs[3]) { int it = 40;//15; double r = 0; for (int i = 0; i < it; ++i) { double J0[3], J1[3], J2[3], *J[3] = { J0, J1, J2 }; double residual[3]; clamp_coeffs(coeffs); eval_residual(coeffs, rgb, residual); eval_jacobian(coeffs, rgb, J); int P[4]; int rv = LUPDecompose(J, 3, 1e-15, P); if (rv != 1) { fprintf(stdout, "RGB %g %g %g -> %g %g %g\n", rgb[0], rgb[1], rgb[2], coeffs[0], coeffs[1], coeffs[2]); fprintf(stdout, "J0 %g %g %g\n", J0[0], J0[1], J0[2]); fprintf(stdout, "J1 %g %g %g\n", J1[0], J1[1], J1[2]); fprintf(stdout, "J2 %g %g %g\n", J2[0], J2[1], J2[2]); return 666.0; } double x[3]; LUPSolve(J, P, residual, 3, x); r = 0.0; for (int j = 0; j < 3; ++j) { coeffs[j] -= x[j]; r += residual[j] * residual[j]; } if (r < 1e-6) break; } return sqrt(r); } static Gamut parse_gamut(const char *str) { if(!strcasecmp(str, "sRGB")) return SRGB; if(!strcasecmp(str, "eRGB")) return ERGB; if(!strcasecmp(str, "XYZ")) return XYZ; if(!strcasecmp(str, "ProPhotoRGB")) return ProPhotoRGB; if(!strcasecmp(str, "ACES2065_1")) return ACES2065_1; if(!strcasecmp(str, "ACES_AP1")) return ACES_AP1; if(!strcasecmp(str, "REC2020")) return REC2020; return SRGB; } int main(int argc, char **argv) { if (argc < 3) { printf("syntax: createlut <resolution> <output> [<gamut>] [-b]\n" "where <gamut> is one of sRGB,eRGB,XYZ,ProPhotoRGB,ACES2065_1,ACES_AP1,REC2020\n"); exit(-1); } for(int k=1;k<argc;k++) if(!strcmp(argv[k], "-b")) use_bad_cmf = 1; Gamut gamut = XYZ; if(argc > 3) gamut = parse_gamut(argv[3]); init_tables(gamut); const int res = atoi(argv[1]); // resolution of 2d lut typedef struct header_t { uint32_t magic; uint16_t version; uint8_t channels; uint8_t datatype; uint32_t wd; uint32_t ht; } header_t; // read max macadam brightness lut int max_w, max_h; float *max_b = 0; { FILE *f = fopen("macadam.lut", "rb"); header_t header; if(!f) goto mac_error; if(fread(&header, sizeof(header_t), 1, f) != 1) goto mac_error; max_w = header.wd; max_h = header.ht; if(header.channels != 1) goto mac_error; if(header.version != 2) goto mac_error; max_b = calloc(sizeof(float), max_w*max_h); uint16_t *half = calloc(sizeof(float), max_w*max_h); fread(half, header.wd*header.ht, sizeof(uint16_t), f); fclose(f); for(int k=0;k<header.wd*header.ht;k++) max_b[k] = half_to_float(half[k]); free(half); if(0) { mac_error: if(f) fclose(f); fprintf(stderr, "could not read macadam.lut!\n"); exit(2); } } printf("optimising "); int lsres = res/4; float *lsbuf = calloc(sizeof(float), 5*lsres*lsres); size_t bufsize = 5*res*res; float *out = calloc(sizeof(float), bufsize); #if defined(_OPENMP) #pragma omp parallel for schedule(dynamic) shared(stdout,out,max_b,max_w,max_h) #endif for (int j = 0; j < res; ++j) { printf("."); fflush(stdout); for (int i = 0; i < res; ++i) { double x = (i) / (double)res; double y = (j) / (double)res; quad2tri(&x, &y); double rgb[3]; double coeffs[3]; init_coeffs(coeffs); // normalise to max(rgb)=1 rgb[0] = x; rgb[1] = y; rgb[2] = 1.0-x-y; if(check_gamut(rgb)) continue; int ii = (int)fmin(max_w - 1, fmax(0, x * max_w + 0.5)); int jj = (int)fmin(max_h - 1, fmax(0, y * max_h + 0.5)); double m = fmax(0.001, 0.5*max_b[ii + max_w * jj]); double rgbm[3] = {rgb[0] * m, rgb[1] * m, rgb[2] * m}; double resid = gauss_newton(rgbm, coeffs); double c0yl[3]; cvt_c012_c0yl(coeffs, c0yl); (void)resid; int idx = j*res + i; out[5*idx + 0] = coeffs[0]; out[5*idx + 1] = coeffs[1]; out[5*idx + 2] = coeffs[2]; float xy[2] = {x, y}, white[2] = {1.0f/3.0f, 1.0f/3.0f}; // illum E //{.3127266, .32902313}; // D65 float sat = spectrum_saturation(xy, white); // bin into lambda/saturation buffer float satc = lsres * sat; // normalise to extended range: float norm = (c0yl[2] - 400.0)/(700.0-400.0); // float lamc = 1.0/(1.0+exp(-2.0*(2.0*norm-1.0))) * lsres / 2; // center deriv=1 // float fx = norm*norm*norm+norm; float fx = norm-0.5; // fx = fx*fx*fx+fx; // worse float lamc = (0.5 + 0.5 * fx / sqrt(fx*fx+0.25)) * lsres / 2; int lami = fmaxf(0, fminf(lsres/2-1, lamc)); int sati = satc; if(c0yl[0] > 0) lami += lsres/2; lami = fmaxf(0, fminf(lsres-1, lami)); sati = fmaxf(0, fminf(lsres-1, sati)); float olamc = lsbuf[5*(lami*lsres + sati)+3]; float osatc = lsbuf[5*(lami*lsres + sati)+4]; float odist = (olamc - lami - 0.5f)*(olamc - lami - 0.5f)+ (osatc - sati - 0.5f)*(osatc - sati - 0.5f); float dist = ( lamc - lami - 0.5f)*( lamc - lami - 0.5f)+ ( satc - sati - 0.5f)*( satc - sati - 0.5f); if(dist < odist) { lsbuf[5*(lami*lsres + sati)+0] = x; lsbuf[5*(lami*lsres + sati)+1] = y; lsbuf[5*(lami*lsres + sati)+2] = 1.0-x-y; lsbuf[5*(lami*lsres + sati)+3] = lamc; lsbuf[5*(lami*lsres + sati)+4] = satc; } out[5*idx + 3] = (lami+0.5f) / (float)lsres; out[5*idx + 4] = (sati+0.5f) / (float)lsres; } } #ifndef MKSPECTRA // don't write spectra.lut { // scope write abney map on (lambda, saturation) buf_t inpaint_buf = { .dat = lsbuf, .wd = lsres, .ht = lsres, .cpp = 5, }; inpaint(&inpaint_buf); // determine gamut boundaries for rec709 and rec2020: // walk each row and find first time it goes outside. // record this in special 1d tables float *bound_rec709 = calloc(sizeof(float), lsres); float *bound_rec2020 = calloc(sizeof(float), lsres); for(int j=0;j<lsres;j++) { int active = 3; for(int i=0;i<lsres;i++) { int idx = j*lsres + i; double xyz[] = {lsbuf[5*idx], lsbuf[5*idx+1], 1.0-lsbuf[5*idx]-lsbuf[5*idx+1]}; double rec709 [3] = {0.0}; double rec2020[3] = {0.0}; for (int k = 0; k < 3; ++k) for (int l = 0; l < 3; ++l) rec709[k] += xyz_to_srgb[k][l] * xyz[l]; for (int k = 0; k < 3; ++k) for (int l = 0; l < 3; ++l) rec2020[k] += xyz_to_rec2020[k][l] * xyz[l]; if((active & 1) && (rec709 [0] < 0 || rec709 [1] < 0 || rec709 [2] < 0)) { bound_rec709[j] = (i-.5f)/(float)lsres; active &= ~1; } if((active & 2) && (rec2020[0] < 0 || rec2020[1] < 0 || rec2020[2] < 0)) { bound_rec2020[j] = (i-.5f)/(float)lsres; active &= ~2; } if(!active) break; } } // write 2 channel half lut: uint32_t size = 2*sizeof(uint16_t)*lsres*(lsres+1); uint16_t *b16 = malloc(size); // also write pfm for debugging purposes FILE *pfm = fopen(argv[2], "wb"); if(pfm) fprintf(pfm, "PF\n%d %d\n-1.0\n", lsres+1, lsres); for(int j=0;j<lsres;j++) { for(int i=0;i<lsres;i++) { int ki = j*lsres + i, ko = j*(lsres+1) + i; b16[2*ko+0] = float_to_half(lsbuf[5*ki+0]); b16[2*ko+1] = float_to_half(lsbuf[5*ki+1]); float q[] = {lsbuf[5*ki], lsbuf[5*ki+1], 1.0f-lsbuf[5*ki]-lsbuf[5*ki+1]}; if(pfm) fwrite(q, sizeof(float), 3, pfm); } b16[2*(j*(lsres+1)+lsres)+0] = float_to_half(bound_rec709 [j]); b16[2*(j*(lsres+1)+lsres)+1] = float_to_half(bound_rec2020[j]); float q[] = {bound_rec709[j], bound_rec2020[j], 0.0f}; if(pfm) fwrite(q, sizeof(float), 3, pfm); } header_t head = (header_t) { .magic = 1234, .version = 2, .channels = 2, .datatype = 0, .wd = lsres+1, .ht = lsres, }; FILE *f = fopen("abney.lut", "wb"); if(f) { fwrite(&head, sizeof(head), 1, f); fwrite(b16, size, 1, f); fclose(f); } free(b16); free(bound_rec709); free(bound_rec2020); if(pfm) fclose(pfm); } #endif #ifdef MKSPECTRA // write four channel lut only for abridged cmf { // write spectra map: (x,y) |--> sigmoid coeffs + saturation header_t head = (header_t) { .magic = 1234, .version = 2, .channels = 4, .datatype = 1, // 32-bit float .wd = res, .ht = res, }; FILE *pfm = fopen(argv[2], "wb"); // also write pfm for debugging purposes if(pfm) fprintf(pfm, "PF\n%d %d\n-1.0\n", res, res); FILE *f = fopen("spectra.lut", "wb"); if(f) fwrite(&head, sizeof(head), 1, f); for(int k=0;k<res*res;k++) { double coeffs[3] = {out[5*k+0], out[5*k+1], out[5*k+2]}; float q[] = {0, 0, 0, out[5*k+4]}; // c0yl works in half, but doesn't interpolate upon lookup :( quantise_coeffs(coeffs, q); if(f) fwrite(q, sizeof(float), 4, f); if(pfm) fwrite(q, sizeof(float), 3, pfm); } if(f) fclose(f); if(pfm) fclose(pfm); } #endif free(out); printf("\n"); }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(unsigned char *pixels,const size_t count) { register unsigned char *p; size_t remaining; p=pixels; remaining=count; while (--remaining) { *(p+1)+=*p; p++; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(pixels,count); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
test77.c
int main() { #pragma omp parallel { int i, j, k; for (i = 0; i < 100000; i++) for (j = 0; j < 100000; j++) { //#pragma omp barrier } } }
pzgssvx.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief Solves a system of linear equations A*X=B * * <pre> * -- Distributed SuperLU routine (version 6.0) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * November 1, 2007 * October 22, 2012 * October 1, 2014 * April 5, 2015 * December 31, 2015 version 4.3 * December 31, 2016 version 5.1.3 * April 10, 2018 version 5.3 * September 18, 2018 version 6.0 * </pre> */ #include <math.h> #include "superlu_zdefs.h" /*! \brief * * <pre> * Purpose * ======= * * PZGSSVX solves a system of linear equations A*X=B, * by using Gaussian elimination with "static pivoting" to * compute the LU factorization of A. * * Static pivoting is a technique that combines the numerical stability * of partial pivoting with the scalability of Cholesky (no pivoting), * to run accurately and efficiently on large numbers of processors. * See our paper at http://www.nersc.gov/~xiaoye/SuperLU/ for a detailed * description of the parallel algorithms. * * The input matrices A and B are distributed by block rows. * Here is a graphical illustration (0-based indexing): * * A B * 0 --------------- ------ * | | | | * | | P0 | | * | | | | * --------------- ------ * - fst_row->| | | | * | | | | | * m_loc | | P1 | | * | | | | | * - | | | | * --------------- ------ * | . | |. | * | . | |. | * | . | |. | * --------------- ------ * * where, fst_row is the row number of the first row, * m_loc is the number of rows local to this processor * These are defined in the 'SuperMatrix' structure, see supermatrix.h. * * * Here are the options for using this code: * * 1. Independent of all the other options specified below, the * user must supply * * - B, the matrix of right-hand sides, distributed by block rows, * and its dimensions ldb (local) and nrhs (global) * - grid, a structure describing the 2D processor mesh * - options->IterRefine, which determines whether or not to * improve the accuracy of the computed solution using * iterative refinement * * On output, B is overwritten with the solution X. * * 2. Depending on options->Fact, the user has four options * for solving A*X=B. The standard option is for factoring * A "from scratch". (The other options, described below, * are used when A is sufficiently similar to a previously * solved problem to save time by reusing part or all of * the previous factorization.) * * - options->Fact = DOFACT: A is factored "from scratch" * * In this case the user must also supply * * o A, the input matrix * * as well as the following options to determine what matrix to * factorize. * * o options->Equil, to specify how to scale the rows and columns * of A to "equilibrate" it (to try to reduce its * condition number and so improve the * accuracy of the computed solution) * * o options->RowPerm, to specify how to permute the rows of A * (typically to control numerical stability) * * o options->ColPerm, to specify how to permute the columns of A * (typically to control fill-in and enhance * parallelism during factorization) * * o options->ReplaceTinyPivot, to specify how to deal with tiny * pivots encountered during factorization * (to control numerical stability) * * The outputs returned include * * o ScalePermstruct, modified to describe how the input matrix A * was equilibrated and permuted: * . ScalePermstruct->DiagScale, indicates whether the rows and/or * columns of A were scaled * . ScalePermstruct->R, array of row scale factors * . ScalePermstruct->C, array of column scale factors * . ScalePermstruct->perm_r, row permutation vector * . ScalePermstruct->perm_c, column permutation vector * * (part of ScalePermstruct may also need to be supplied on input, * depending on options->RowPerm and options->ColPerm as described * later). * * o A, the input matrix A overwritten by the scaled and permuted * matrix diag(R)*A*diag(C)*Pc^T, where * Pc is the row permutation matrix determined by * ScalePermstruct->perm_c * diag(R) and diag(C) are diagonal scaling matrices determined * by ScalePermstruct->DiagScale, ScalePermstruct->R and * ScalePermstruct->C * * o LUstruct, which contains the L and U factorization of A1 where * * A1 = Pc*Pr*diag(R)*A*diag(C)*Pc^T = L*U * * (Note that A1 = Pc*Pr*Aout, where Aout is the matrix stored * in A on output.) * * 3. The second value of options->Fact assumes that a matrix with the same * sparsity pattern as A has already been factored: * * - options->Fact = SamePattern: A is factored, assuming that it has * the same nonzero pattern as a previously factored matrix. In * this case the algorithm saves time by reusing the previously * computed column permutation vector stored in * ScalePermstruct->perm_c and the "elimination tree" of A * stored in LUstruct->etree * * In this case the user must still specify the following options * as before: * * o options->Equil * o options->RowPerm * o options->ReplaceTinyPivot * * but not options->ColPerm, whose value is ignored. This is because the * previous column permutation from ScalePermstruct->perm_c is used as * input. The user must also supply * * o A, the input matrix * o ScalePermstruct->perm_c, the column permutation * o LUstruct->etree, the elimination tree * * The outputs returned include * * o A, the input matrix A overwritten by the scaled and permuted * matrix as described above * o ScalePermstruct, modified to describe how the input matrix A was * equilibrated and row permuted * o LUstruct, modified to contain the new L and U factors * * 4. The third value of options->Fact assumes that a matrix B with the same * sparsity pattern as A has already been factored, and where the * row permutation of B can be reused for A. This is useful when A and B * have similar numerical values, so that the same row permutation * will make both factorizations numerically stable. This lets us reuse * all of the previously computed structure of L and U. * * - options->Fact = SamePattern_SameRowPerm: A is factored, * assuming not only the same nonzero pattern as the previously * factored matrix B, but reusing B's row permutation. * * In this case the user must still specify the following options * as before: * * o options->Equil * o options->ReplaceTinyPivot * * but not options->RowPerm or options->ColPerm, whose values are * ignored. This is because the permutations from ScalePermstruct->perm_r * and ScalePermstruct->perm_c are used as input. * * The user must also supply * * o A, the input matrix * o ScalePermstruct->DiagScale, how the previous matrix was row * and/or column scaled * o ScalePermstruct->R, the row scalings of the previous matrix, * if any * o ScalePermstruct->C, the columns scalings of the previous matrix, * if any * o ScalePermstruct->perm_r, the row permutation of the previous * matrix * o ScalePermstruct->perm_c, the column permutation of the previous * matrix * o all of LUstruct, the previously computed information about * L and U (the actual numerical values of L and U * stored in LUstruct->Llu are ignored) * * The outputs returned include * * o A, the input matrix A overwritten by the scaled and permuted * matrix as described above * o ScalePermstruct, modified to describe how the input matrix A was * equilibrated (thus ScalePermstruct->DiagScale, * R and C may be modified) * o LUstruct, modified to contain the new L and U factors * * 5. The fourth and last value of options->Fact assumes that A is * identical to a matrix that has already been factored on a previous * call, and reuses its entire LU factorization * * - options->Fact = Factored: A is identical to a previously * factorized matrix, so the entire previous factorization * can be reused. * * In this case all the other options mentioned above are ignored * (options->Equil, options->RowPerm, options->ColPerm, * options->ReplaceTinyPivot) * * The user must also supply * * o A, the unfactored matrix, only in the case that iterative * refinement is to be done (specifically A must be the output * A from the previous call, so that it has been scaled and permuted) * o all of ScalePermstruct * o all of LUstruct, including the actual numerical values of * L and U * * all of which are unmodified on output. * * Arguments * ========= * * options (input) superlu_dist_options_t* (global) * The structure defines the input parameters to control * how the LU decomposition will be performed. * The following fields should be defined for this structure: * * o Fact (fact_t) * Specifies whether or not the factored form of the matrix * A is supplied on entry, and if not, how the matrix A should * be factorized based on the previous history. * * = DOFACT: The matrix A will be factorized from scratch. * Inputs: A * options->Equil, RowPerm, ColPerm, ReplaceTinyPivot * Outputs: modified A * (possibly row and/or column scaled and/or * permuted) * all of ScalePermstruct * all of LUstruct * * = SamePattern: the matrix A will be factorized assuming * that a factorization of a matrix with the same sparsity * pattern was performed prior to this one. Therefore, this * factorization will reuse column permutation vector * ScalePermstruct->perm_c and the elimination tree * LUstruct->etree * Inputs: A * options->Equil, RowPerm, ReplaceTinyPivot * ScalePermstruct->perm_c * LUstruct->etree * Outputs: modified A * (possibly row and/or column scaled and/or * permuted) * rest of ScalePermstruct (DiagScale, R, C, perm_r) * rest of LUstruct (GLU_persist, Llu) * * = SamePattern_SameRowPerm: the matrix A will be factorized * assuming that a factorization of a matrix with the same * sparsity pattern and similar numerical values was performed * prior to this one. Therefore, this factorization will reuse * both row and column scaling factors R and C, and the * both row and column permutation vectors perm_r and perm_c, * distributed data structure set up from the previous symbolic * factorization. * Inputs: A * options->Equil, ReplaceTinyPivot * all of ScalePermstruct * all of LUstruct * Outputs: modified A * (possibly row and/or column scaled and/or * permuted) * modified LUstruct->Llu * = FACTORED: the matrix A is already factored. * Inputs: all of ScalePermstruct * all of LUstruct * * o Equil (yes_no_t) * Specifies whether to equilibrate the system. * = NO: no equilibration. * = YES: scaling factors are computed to equilibrate the system: * diag(R)*A*diag(C)*inv(diag(C))*X = diag(R)*B. * Whether or not the system will be equilibrated depends * on the scaling of the matrix A, but if equilibration is * used, A is overwritten by diag(R)*A*diag(C) and B by * diag(R)*B. * * o RowPerm (rowperm_t) * Specifies how to permute rows of the matrix A. * = NATURAL: use the natural ordering. * = LargeDiag_MC64: use the Duff/Koster algorithm to permute rows * of the original matrix to make the diagonal large * relative to the off-diagonal. * = LargeDiag_APWM: use the parallel approximate-weight perfect * matching to permute rows of the original matrix * to make the diagonal large relative to the * off-diagonal. * = MY_PERMR: use the ordering given in ScalePermstruct->perm_r * input by the user. * * o ColPerm (colperm_t) * Specifies what type of column permutation to use to reduce fill. * = NATURAL: natural ordering. * = MMD_AT_PLUS_A: minimum degree ordering on structure of A'+A. * = MMD_ATA: minimum degree ordering on structure of A'*A. * = MY_PERMC: the ordering given in ScalePermstruct->perm_c. * * o ReplaceTinyPivot (yes_no_t) * = NO: do not modify pivots * = YES: replace tiny pivots by sqrt(epsilon)*norm(A) during * LU factorization. * * o IterRefine (IterRefine_t) * Specifies how to perform iterative refinement. * = NO: no iterative refinement. * = SLU_DOUBLE: accumulate residual in double precision. * = SLU_EXTRA: accumulate residual in extra precision. * * NOTE: all options must be identical on all processes when * calling this routine. * * A (input/output) SuperMatrix* (local) * On entry, matrix A in A*X=B, of dimension (A->nrow, A->ncol). * The number of linear equations is A->nrow. The type of A must be: * Stype = SLU_NR_loc; Dtype = SLU_D; Mtype = SLU_GE. * That is, A is stored in distributed compressed row format. * See supermatrix.h for the definition of 'SuperMatrix'. * This routine only handles square A, however, the LU factorization * routine PDGSTRF can factorize rectangular matrices. * On exit, A may be overwtirren by diag(R)*A*diag(C)*Pc^T, * depending on ScalePermstruct->DiagScale and options->ColPerm: * if ScalePermstruct->DiagScale != NOEQUIL, A is overwritten by * diag(R)*A*diag(C). * if options->ColPerm != NATURAL, A is further overwritten by * diag(R)*A*diag(C)*Pc^T. * If all the above condition are true, the LU decomposition is * performed on the matrix Pc*Pr*diag(R)*A*diag(C)*Pc^T. * * ScalePermstruct (input/output) ScalePermstruct_t* (global) * The data structure to store the scaling and permutation vectors * describing the transformations performed to the matrix A. * It contains the following fields: * * o DiagScale (DiagScale_t) * Specifies the form of equilibration that was done. * = NOEQUIL: no equilibration. * = ROW: row equilibration, i.e., A was premultiplied by * diag(R). * = COL: Column equilibration, i.e., A was postmultiplied * by diag(C). * = BOTH: both row and column equilibration, i.e., A was * replaced by diag(R)*A*diag(C). * If options->Fact = FACTORED or SamePattern_SameRowPerm, * DiagScale is an input argument; otherwise it is an output * argument. * * o perm_r (int*) * Row permutation vector, which defines the permutation matrix Pr; * perm_r[i] = j means row i of A is in position j in Pr*A. * If options->RowPerm = MY_PERMR, or * options->Fact = SamePattern_SameRowPerm, perm_r is an * input argument; otherwise it is an output argument. * * o perm_c (int*) * Column permutation vector, which defines the * permutation matrix Pc; perm_c[i] = j means column i of A is * in position j in A*Pc. * If options->ColPerm = MY_PERMC or options->Fact = SamePattern * or options->Fact = SamePattern_SameRowPerm, perm_c is an * input argument; otherwise, it is an output argument. * On exit, perm_c may be overwritten by the product of the input * perm_c and a permutation that postorders the elimination tree * of Pc*A'*A*Pc'; perm_c is not changed if the elimination tree * is already in postorder. * * o R (double*) dimension (A->nrow) * The row scale factors for A. * If DiagScale = ROW or BOTH, A is multiplied on the left by * diag(R). * If DiagScale = NOEQUIL or COL, R is not defined. * If options->Fact = FACTORED or SamePattern_SameRowPerm, R is * an input argument; otherwise, R is an output argument. * * o C (double*) dimension (A->ncol) * The column scale factors for A. * If DiagScale = COL or BOTH, A is multiplied on the right by * diag(C). * If DiagScale = NOEQUIL or ROW, C is not defined. * If options->Fact = FACTORED or SamePattern_SameRowPerm, C is * an input argument; otherwise, C is an output argument. * * B (input/output) doublecomplex* (local) * On entry, the right-hand side matrix of dimension (m_loc, nrhs), * where, m_loc is the number of rows stored locally on my * process and is defined in the data structure of matrix A. * On exit, the solution matrix if info = 0; * * ldb (input) int (local) * The leading dimension of matrix B. * * nrhs (input) int (global) * The number of right-hand sides. * If nrhs = 0, only LU decomposition is performed, the forward * and back substitutions are skipped. * * grid (input) gridinfo_t* (global) * The 2D process mesh. It contains the MPI communicator, the number * of process rows (NPROW), the number of process columns (NPCOL), * and my process rank. It is an input argument to all the * parallel routines. * Grid can be initialized by subroutine SUPERLU_GRIDINIT. * See superlu_zdefs.h for the definition of 'gridinfo_t'. * * LUstruct (input/output) LUstruct_t* * The data structures to store the distributed L and U factors. * It contains the following fields: * * o etree (int*) dimension (A->ncol) (global) * Elimination tree of Pc*(A'+A)*Pc' or Pc*A'*A*Pc'. * It is computed in sp_colorder() during the first factorization, * and is reused in the subsequent factorizations of the matrices * with the same nonzero pattern. * On exit of sp_colorder(), the columns of A are permuted so that * the etree is in a certain postorder. This postorder is reflected * in ScalePermstruct->perm_c. * NOTE: * Etree is a vector of parent pointers for a forest whose vertices * are the integers 0 to A->ncol-1; etree[root]==A->ncol. * * o Glu_persist (Glu_persist_t*) (global) * Global data structure (xsup, supno) replicated on all processes, * describing the supernode partition in the factored matrices * L and U: * xsup[s] is the leading column of the s-th supernode, * supno[i] is the supernode number to which column i belongs. * * o Llu (LocalLU_t*) (local) * The distributed data structures to store L and U factors. * See superlu_zdefs.h for the definition of 'LocalLU_t'. * * SOLVEstruct (input/output) SOLVEstruct_t* * The data structure to hold the communication pattern used * in the phases of triangular solution and iterative refinement. * This pattern should be initialized only once for repeated solutions. * If options->SolveInitialized = YES, it is an input argument. * If options->SolveInitialized = NO and nrhs != 0, it is an output * argument. See superlu_zdefs.h for the definition of 'SOLVEstruct_t'. * * berr (output) double*, dimension (nrhs) (global) * The componentwise relative backward error of each solution * vector X(j) (i.e., the smallest relative change in * any element of A or B that makes X(j) an exact solution). * * stat (output) SuperLUStat_t* * Record the statistics on runtime and floating-point operation count. * See util.h for the definition of 'SuperLUStat_t'. * * info (output) int* * = 0: successful exit * > 0: if info = i, and i is * <= A->ncol: U(i,i) is exactly zero. The factorization has * been completed, but the factor U is exactly singular, * so the solution could not be computed. * > A->ncol: number of bytes allocated when memory allocation * failure occurred, plus A->ncol. * * See superlu_zdefs.h for the definitions of various data types. * </pre> */ void pzgssvx(superlu_dist_options_t *options, SuperMatrix *A, ScalePermstruct_t *ScalePermstruct, doublecomplex B[], int ldb, int nrhs, gridinfo_t *grid, LUstruct_t *LUstruct, SOLVEstruct_t *SOLVEstruct, double *berr, SuperLUStat_t *stat, int *info) { NRformat_loc *Astore; SuperMatrix GA; /* Global A in NC format */ NCformat *GAstore; doublecomplex *a_GA; SuperMatrix GAC; /* Global A in NCP format (add n end pointers) */ NCPformat *GACstore; Glu_persist_t *Glu_persist = LUstruct->Glu_persist; Glu_freeable_t *Glu_freeable; /* The nonzero structures of L and U factors, which are replicated on all processrs. (lsub, xlsub) contains the compressed subscript of supernodes in L. (usub, xusub) contains the compressed subscript of nonzero segments in U. If options->Fact != SamePattern_SameRowPerm, they are computed by SYMBFACT routine, and then used by PDDISTRIBUTE routine. They will be freed after PDDISTRIBUTE routine. If options->Fact == SamePattern_SameRowPerm, these structures are not used. */ fact_t Fact; doublecomplex *a; int_t *colptr, *rowind; int_t *perm_r; /* row permutations from partial pivoting */ int_t *perm_c; /* column permutation vector */ int_t *etree; /* elimination tree */ int_t *rowptr, *colind; /* Local A in NR*/ int_t colequ, Equil, factored, job, notran, rowequ, need_value; int_t i, iinfo, j, irow, m, n, nnz, permc_spec; int_t nnz_loc, m_loc, fst_row, icol; int iam,iam_g; int ldx; /* LDA for matrix X (local). */ char equed[1], norm[1]; double *C, *R, *C1, *R1, amax, anorm, colcnd, rowcnd; doublecomplex *X, *b_col, *b_work, *x_col; double t; float GA_mem_use = 0.0; /* memory usage by global A */ float dist_mem_use = 0.0; /* memory usage during distribution */ superlu_dist_mem_usage_t num_mem_usage, symb_mem_usage; int64_t nnzLU; int_t nnz_tot; doublecomplex *nzval_a; doublecomplex asum,asum_tot,lsum,lsum_tot; int_t nsupers,nsupers_j; int_t lk,k,knsupc,nsupr; int_t *lsub,*xsup; doublecomplex *lusup; #if ( PRNTlevel>= 2 ) double dmin, dsum, dprod; #endif LUstruct->dt = 'z'; /* Structures needed for parallel symbolic factorization */ int_t *sizes, *fstVtxSep, parSymbFact; int noDomains, nprocs_num; MPI_Comm symb_comm; /* communicator for symbolic factorization */ int col, key; /* parameters for creating a new communicator */ Pslu_freeable_t Pslu_freeable; float flinfo; /* Initialization. */ m = A->nrow; n = A->ncol; Astore = (NRformat_loc *) A->Store; nnz_loc = Astore->nnz_loc; m_loc = Astore->m_loc; fst_row = Astore->fst_row; a = (doublecomplex *) Astore->nzval; rowptr = Astore->rowptr; colind = Astore->colind; sizes = NULL; fstVtxSep = NULL; symb_comm = MPI_COMM_NULL; num_mem_usage.for_lu = num_mem_usage.total = 0.0; symb_mem_usage.for_lu = symb_mem_usage.total = 0.0; /* Test the input parameters. */ *info = 0; Fact = options->Fact; if ( Fact < 0 || Fact > FACTORED ) *info = -1; else if ( options->RowPerm < 0 || options->RowPerm > MY_PERMR ) *info = -1; else if ( options->ColPerm < 0 || options->ColPerm > MY_PERMC ) *info = -1; else if ( options->IterRefine < 0 || options->IterRefine > SLU_EXTRA ) *info = -1; else if ( options->IterRefine == SLU_EXTRA ) { *info = -1; printf("ERROR: Extra precise iterative refinement yet to support.\n"); } else if ( A->nrow != A->ncol || A->nrow < 0 || A->Stype != SLU_NR_loc || A->Dtype != SLU_Z || A->Mtype != SLU_GE ) *info = -2; else if ( ldb < m_loc ) *info = -5; else if ( nrhs < 0 ) *info = -6; if ( sp_ienv_dist(2) > sp_ienv_dist(3) ) { *info = 1; printf("ERROR: Relaxation (NREL) cannot be larger than max. supernode size (NSUP).\n" "\t-> Check parameter setting in sp_ienv_dist.c to correct error.\n"); } if ( *info ) { i = -(*info); pxerr_dist("pzgssvx", grid, -*info); return; } factored = (Fact == FACTORED); Equil = (!factored && options->Equil == YES); notran = (options->Trans == NOTRANS); parSymbFact = options->ParSymbFact; iam = grid->iam; job = 5; if ( factored || (Fact == SamePattern_SameRowPerm && Equil) ) { rowequ = (ScalePermstruct->DiagScale == ROW) || (ScalePermstruct->DiagScale == BOTH); colequ = (ScalePermstruct->DiagScale == COL) || (ScalePermstruct->DiagScale == BOTH); } else rowequ = colequ = FALSE; /* The following arrays are replicated on all processes. */ perm_r = ScalePermstruct->perm_r; perm_c = ScalePermstruct->perm_c; etree = LUstruct->etree; R = ScalePermstruct->R; C = ScalePermstruct->C; /********/ #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Enter pzgssvx()"); #endif /* Not factored & ask for equilibration */ if ( Equil && Fact != SamePattern_SameRowPerm ) { /* Allocate storage if not done so before. */ switch ( ScalePermstruct->DiagScale ) { case NOEQUIL: if ( !(R = (double *) doubleMalloc_dist(m)) ) ABORT("Malloc fails for R[]."); if ( !(C = (double *) doubleMalloc_dist(n)) ) ABORT("Malloc fails for C[]."); ScalePermstruct->R = R; ScalePermstruct->C = C; break; case ROW: if ( !(C = (double *) doubleMalloc_dist(n)) ) ABORT("Malloc fails for C[]."); ScalePermstruct->C = C; break; case COL: if ( !(R = (double *) doubleMalloc_dist(m)) ) ABORT("Malloc fails for R[]."); ScalePermstruct->R = R; break; } } /* ------------------------------------------------------------ * Diagonal scaling to equilibrate the matrix. (simple scheme) * for row i = 1:n, A(i,:) <- A(i,:) / max(abs(A(i,:)); * for column j = 1:n, A(:,j) <- A(:, j) / max(abs(A(:,j)) * ------------------------------------------------------------*/ if ( Equil ) { #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Enter equil"); #endif t = SuperLU_timer_(); if ( Fact == SamePattern_SameRowPerm ) { /* Reuse R and C. */ switch ( ScalePermstruct->DiagScale ) { case NOEQUIL: break; case ROW: irow = fst_row; for (j = 0; j < m_loc; ++j) { for (i = rowptr[j]; i < rowptr[j+1]; ++i) { zd_mult(&a[i], &a[i], R[irow]); /* Scale rows */ } ++irow; } break; case COL: for (j = 0; j < m_loc; ++j) for (i = rowptr[j]; i < rowptr[j+1]; ++i){ icol = colind[i]; zd_mult(&a[i], &a[i], C[icol]); /* Scale columns */ } break; case BOTH: irow = fst_row; for (j = 0; j < m_loc; ++j) { for (i = rowptr[j]; i < rowptr[j+1]; ++i) { icol = colind[i]; zd_mult(&a[i], &a[i], R[irow]); /* Scale rows */ zd_mult(&a[i], &a[i], C[icol]); /* Scale columns */ } ++irow; } break; } } else { /* Compute R & C from scratch */ /* Compute the row and column scalings. */ pzgsequ(A, R, C, &rowcnd, &colcnd, &amax, &iinfo, grid); if ( iinfo > 0 ) { if ( iinfo <= m ) { #if ( PRNTlevel>=1 ) fprintf(stderr, "The " IFMT "-th row of A is exactly zero\n", iinfo); #endif } else { #if ( PRNTlevel>=1 ) fprintf(stderr, "The " IFMT "-th column of A is exactly zero\n", iinfo-n); #endif } } else if ( iinfo < 0 ) return; /* Now iinfo == 0 */ /* Equilibrate matrix A if it is badly-scaled. A <-- diag(R)*A*diag(C) */ pzlaqgs(A, R, C, rowcnd, colcnd, amax, equed); if ( strncmp(equed, "R", 1)==0 ) { ScalePermstruct->DiagScale = ROW; rowequ = ROW; } else if ( strncmp(equed, "C", 1)==0 ) { ScalePermstruct->DiagScale = COL; colequ = COL; } else if ( strncmp(equed, "B", 1)==0 ) { ScalePermstruct->DiagScale = BOTH; rowequ = ROW; colequ = COL; } else ScalePermstruct->DiagScale = NOEQUIL; #if ( PRNTlevel>=1 ) if ( !iam ) { printf(".. equilibrated? *equed = %c\n", *equed); fflush(stdout); } #endif } /* end if Fact ... */ stat->utime[EQUIL] = SuperLU_timer_() - t; #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Exit equil"); #endif } /* end if Equil ... LAPACK style, not involving MC64 */ if ( !factored ) { /* Skip this if already factored. */ /* * For serial symbolic factorization, gather A from the distributed * compressed row format to global A in compressed column format. * Numerical values are gathered only when a row permutation * for large diagonal is sought after. */ if ( Fact != SamePattern_SameRowPerm && (parSymbFact == NO || options->RowPerm != NO) ) { /* Performs serial symbolic factorzation and/or MC64 */ need_value = (options->RowPerm == LargeDiag_MC64); pzCompRow_loc_to_CompCol_global(need_value, A, grid, &GA); GAstore = (NCformat *) GA.Store; colptr = GAstore->colptr; rowind = GAstore->rowind; nnz = GAstore->nnz; GA_mem_use = (nnz + n + 1) * sizeof(int_t); if ( need_value ) { a_GA = (doublecomplex *) GAstore->nzval; GA_mem_use += nnz * sizeof(doublecomplex); } else assert(GAstore->nzval == NULL); } /* ------------------------------------------------------------ Find the row permutation Pr for A, and apply Pr*[GA]. GA is overwritten by Pr*[GA]. ------------------------------------------------------------*/ if ( options->RowPerm != NO ) { t = SuperLU_timer_(); if ( Fact != SamePattern_SameRowPerm ) { if ( options->RowPerm == MY_PERMR ) { /* Use user's perm_r. */ /* Permute the global matrix GA for symbfact() */ for (i = 0; i < colptr[n]; ++i) { irow = rowind[i]; rowind[i] = perm_r[irow]; } } else if ( options->RowPerm == LargeDiag_MC64 ) { /* Get a new perm_r[] from MC64 */ if ( job == 5 ) { /* Allocate storage for scaling factors. */ if ( !(R1 = doubleMalloc_dist(m)) ) ABORT("SUPERLU_MALLOC fails for R1[]"); if ( !(C1 = doubleMalloc_dist(n)) ) ABORT("SUPERLU_MALLOC fails for C1[]"); } if ( !iam ) { /* Process 0 finds a row permutation */ iinfo = zldperm_dist(job, m, nnz, colptr, rowind, a_GA, perm_r, R1, C1); MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm ); if ( iinfo == 0 ) { MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm ); if ( job == 5 && Equil ) { MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm ); MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm ); } } } else { MPI_Bcast( &iinfo, 1, mpi_int_t, 0, grid->comm ); if ( iinfo == 0 ) { MPI_Bcast( perm_r, m, mpi_int_t, 0, grid->comm ); if ( job == 5 && Equil ) { MPI_Bcast( R1, m, MPI_DOUBLE, 0, grid->comm ); MPI_Bcast( C1, n, MPI_DOUBLE, 0, grid->comm ); } } } if ( iinfo && job == 5) { /* Error return */ SUPERLU_FREE(R1); SUPERLU_FREE(C1); } #if ( PRNTlevel>=2 ) dmin = dmach_dist("Overflow"); dsum = 0.0; dprod = 1.0; #endif if ( iinfo == 0 ) { if ( job == 5 ) { if ( Equil ) { for (i = 0; i < n; ++i) { R1[i] = exp(R1[i]); C1[i] = exp(C1[i]); } /* Scale the distributed matrix further. A <-- diag(R1)*A*diag(C1) */ irow = fst_row; for (j = 0; j < m_loc; ++j) { for (i = rowptr[j]; i < rowptr[j+1]; ++i) { icol = colind[i]; zd_mult(&a[i], &a[i], R1[irow]); zd_mult(&a[i], &a[i], C1[icol]); #if ( PRNTlevel>=2 ) if ( perm_r[irow] == icol ) { /* New diagonal */ if ( job == 2 || job == 3 ) dmin = SUPERLU_MIN(dmin, slud_z_abs1(&a[i])); else if ( job == 4 ) dsum += slud_z_abs1(&a[i]); else if ( job == 5 ) dprod *= slud_z_abs1(&a[i]); } #endif } ++irow; } /* Multiply together the scaling factors -- R/C from simple scheme, R1/C1 from MC64. */ if ( rowequ ) for (i = 0; i < m; ++i) R[i] *= R1[i]; else for (i = 0; i < m; ++i) R[i] = R1[i]; if ( colequ ) for (i = 0; i < n; ++i) C[i] *= C1[i]; else for (i = 0; i < n; ++i) C[i] = C1[i]; ScalePermstruct->DiagScale = BOTH; rowequ = colequ = 1; } /* end Equil */ /* Now permute global GA to prepare for symbfact() */ for (j = 0; j < n; ++j) { for (i = colptr[j]; i < colptr[j+1]; ++i) { irow = rowind[i]; rowind[i] = perm_r[irow]; } } SUPERLU_FREE (R1); SUPERLU_FREE (C1); } else { /* job = 2,3,4 */ for (j = 0; j < n; ++j) { for (i = colptr[j]; i < colptr[j+1]; ++i) { irow = rowind[i]; rowind[i] = perm_r[irow]; } /* end for i ... */ } /* end for j ... */ } /* end else job ... */ } else { /* if iinfo != 0 */ for (i = 0; i < m; ++i) perm_r[i] = i; } #if ( PRNTlevel>=2 ) if ( job == 2 || job == 3 ) { if ( !iam ) printf("\tsmallest diagonal %e\n", dmin); } else if ( job == 4 ) { if ( !iam ) printf("\tsum of diagonal %e\n", dsum); } else if ( job == 5 ) { if ( !iam ) printf("\t product of diagonal %e\n", dprod); } #endif } else { /* use largeDiag_AWPM */ #ifdef HAVE_COMBBLAS c2cpp_GetAWPM(A, grid, ScalePermstruct); #else if ( iam == 0 ) { printf("CombBLAS is not available\n"); fflush(stdout); } #endif } /* end if options->RowPerm ... */ t = SuperLU_timer_() - t; stat->utime[ROWPERM] = t; #if ( PRNTlevel>=1 ) if ( !iam ) { printf(".. LDPERM job " IFMT "\t time: %.2f\n", job, t); fflush(stdout); } #endif } /* end if Fact ... */ } else { /* options->RowPerm == NOROWPERM / NATURAL */ for (i = 0; i < m; ++i) perm_r[i] = i; } #if ( DEBUGlevel>=2 ) if ( !iam ) PrintInt10("perm_r", m, perm_r); #endif } /* end if (!factored) */ if ( !factored || options->IterRefine ) { /* Compute norm(A), which will be used to adjust small diagonal. */ if ( notran ) *(unsigned char *)norm = '1'; else *(unsigned char *)norm = 'I'; anorm = pzlangs(norm, A, grid); #if ( PRNTlevel>=1 ) if ( !iam ) { printf(".. anorm %e\n", anorm); fflush(stdout); } #endif } /* ------------------------------------------------------------ Perform the LU factorization: symbolic factorization, redistribution, and numerical factorization. ------------------------------------------------------------*/ if ( !factored ) { t = SuperLU_timer_(); /* * Get column permutation vector perm_c[], according to permc_spec: * permc_spec = NATURAL: natural ordering * permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A * permc_spec = MMD_ATA: minimum degree on structure of A'*A * permc_spec = METIS_AT_PLUS_A: METIS on structure of A'+A * permc_spec = PARMETIS: parallel METIS on structure of A'+A * permc_spec = MY_PERMC: the ordering already supplied in perm_c[] */ permc_spec = options->ColPerm; if ( parSymbFact == YES || permc_spec == PARMETIS ) { nprocs_num = grid->nprow * grid->npcol; noDomains = (int) ( pow(2, ((int) LOG2( nprocs_num )))); /* create a new communicator for the first noDomains processes in grid->comm */ key = iam; if (iam < noDomains) col = 0; else col = MPI_UNDEFINED; MPI_Comm_split (grid->comm, col, key, &symb_comm ); if ( permc_spec == NATURAL || permc_spec == MY_PERMC ) { if ( permc_spec == NATURAL ) { for (j = 0; j < n; ++j) perm_c[j] = j; } if ( !(sizes = intMalloc_dist(2 * noDomains)) ) ABORT("SUPERLU_MALLOC fails for sizes."); if ( !(fstVtxSep = intMalloc_dist(2 * noDomains)) ) ABORT("SUPERLU_MALLOC fails for fstVtxSep."); for (i = 0; i < 2*noDomains - 2; ++i) { sizes[i] = 0; fstVtxSep[i] = 0; } sizes[2*noDomains - 2] = m; fstVtxSep[2*noDomains - 2] = 0; } else if ( permc_spec != PARMETIS ) { /* same as before */ printf("{" IFMT "," IFMT "}: pzgssvx: invalid ColPerm option when ParSymbfact is used\n", MYROW(grid->iam, grid), MYCOL(grid->iam, grid)); } } if ( permc_spec != MY_PERMC && Fact == DOFACT ) { /* Reuse perm_c if Fact == SamePattern, or SamePattern_SameRowPerm */ if ( permc_spec == PARMETIS ) { // #pragma omp parallel // { // #pragma omp master // { /* Get column permutation vector in perm_c. * * This routine takes as input the distributed input matrix A * * and does not modify it. It also allocates memory for * * sizes[] and fstVtxSep[] arrays, that contain information * * on the separator tree computed by ParMETIS. */ flinfo = get_perm_c_parmetis(A, perm_r, perm_c, nprocs_num, noDomains, &sizes, &fstVtxSep, grid, &symb_comm); // } // } if (flinfo > 0) { #if ( PRNTlevel>=1 ) fprintf(stderr, "Insufficient memory for get_perm_c parmetis\n"); #endif *info = flinfo; return; } } else { get_perm_c_dist(iam, permc_spec, &GA, perm_c); } } stat->utime[COLPERM] = SuperLU_timer_() - t; /* Symbolic factorization. */ if ( Fact != SamePattern_SameRowPerm ) { if ( parSymbFact == NO ) { /* Perform serial symbolic factorization */ /* GA = Pr*A, perm_r[] is already applied. */ int_t *GACcolbeg, *GACcolend, *GACrowind; /* Compute the elimination tree of Pc*(A^T+A)*Pc^T or Pc*A^T*A*Pc^T (a.k.a. column etree), depending on the choice of ColPerm. Adjust perm_c[] to be consistent with a postorder of etree. Permute columns of A to form A*Pc'. After this routine, GAC = GA*Pc^T. */ sp_colorder(options, &GA, perm_c, etree, &GAC); /* Form Pc*A*Pc^T to preserve the diagonal of the matrix GAC. */ GACstore = (NCPformat *) GAC.Store; GACcolbeg = GACstore->colbeg; GACcolend = GACstore->colend; GACrowind = GACstore->rowind; for (j = 0; j < n; ++j) { for (i = GACcolbeg[j]; i < GACcolend[j]; ++i) { irow = GACrowind[i]; GACrowind[i] = perm_c[irow]; } } /* Perform a symbolic factorization on Pc*Pr*A*Pc^T and set up the nonzero data structures for L & U. */ #if ( PRNTlevel>=1 ) if ( !iam ) { printf(".. symbfact(): relax " IFMT ", maxsuper " IFMT ", fill " IFMT "\n", sp_ienv_dist(2), sp_ienv_dist(3), sp_ienv_dist(6)); fflush(stdout); } #endif t = SuperLU_timer_(); if ( !(Glu_freeable = (Glu_freeable_t *) SUPERLU_MALLOC(sizeof(Glu_freeable_t))) ) ABORT("Malloc fails for Glu_freeable."); /* Every process does this. */ iinfo = symbfact(options, iam, &GAC, perm_c, etree, Glu_persist, Glu_freeable); nnzLU = Glu_freeable->nnzLU; stat->utime[SYMBFAC] = SuperLU_timer_() - t; if ( iinfo <= 0 ) { /* Successful return */ QuerySpace_dist(n, -iinfo, Glu_freeable, &symb_mem_usage); #if ( PRNTlevel>=1 ) if ( !iam ) { printf("\tNo of supers " IFMT "\n", Glu_persist->supno[n-1]+1); printf("\tSize of G(L) " IFMT "\n", Glu_freeable->xlsub[n]); printf("\tSize of G(U) " IFMT "\n", Glu_freeable->xusub[n]); printf("\tint %d, short %d, float %d, double %d\n", (int) sizeof(int_t), (int) sizeof(short), (int) sizeof(float), (int) sizeof(double)); printf("\tSYMBfact (MB):\tL\\U %.2f\ttotal %.2f\texpansions " IFMT "\n", symb_mem_usage.for_lu*1e-6, symb_mem_usage.total*1e-6, symb_mem_usage.expansions); fflush(stdout); } #endif } else { /* symbfact out of memory */ #if ( PRNTlevel>=1 ) if ( !iam ) fprintf(stderr,"symbfact() error returns " IFMT "\n",iinfo); #endif *info = iinfo; return; } } /* end serial symbolic factorization */ else { /* parallel symbolic factorization */ t = SuperLU_timer_(); flinfo = symbfact_dist(nprocs_num, noDomains, A, perm_c, perm_r, sizes, fstVtxSep, &Pslu_freeable, &(grid->comm), &symb_comm, &symb_mem_usage); nnzLU = Pslu_freeable.nnzLU; stat->utime[SYMBFAC] = SuperLU_timer_() - t; if (flinfo > 0) { #if ( PRNTlevel>=1 ) fprintf(stderr, "Insufficient memory for parallel symbolic factorization."); #endif *info = flinfo; return; } } /* Destroy global GA */ if ( parSymbFact == NO || options->RowPerm != NO ) Destroy_CompCol_Matrix_dist(&GA); if ( parSymbFact == NO ) Destroy_CompCol_Permuted_dist(&GAC); } /* end if Fact != SamePattern_SameRowPerm ... */ if (sizes) SUPERLU_FREE (sizes); if (fstVtxSep) SUPERLU_FREE (fstVtxSep); if (symb_comm != MPI_COMM_NULL) MPI_Comm_free (&symb_comm); /* Distribute entries of A into L & U data structures. */ //if (parSymbFact == NO || ???? Fact == SamePattern_SameRowPerm) { if ( parSymbFact == NO ) { /* CASE OF SERIAL SYMBOLIC */ /* Apply column permutation to the original distributed A */ for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]]; /* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc^T into L and U storage. NOTE: the row permutation Pc*Pr is applied internally in the distribution routine. */ t = SuperLU_timer_(); dist_mem_use = pzdistribute(Fact, n, A, ScalePermstruct, Glu_freeable, LUstruct, grid); stat->utime[DIST] = SuperLU_timer_() - t; /* Deallocate storage used in symbolic factorization. */ if ( Fact != SamePattern_SameRowPerm ) { iinfo = symbfact_SubFree(Glu_freeable); SUPERLU_FREE(Glu_freeable); } } else { /* CASE OF PARALLEL SYMBOLIC */ /* Distribute Pc*Pr*diag(R)*A*diag(C)*Pc' into L and U storage. NOTE: the row permutation Pc*Pr is applied internally in the distribution routine. */ /* Apply column permutation to the original distributed A */ for (j = 0; j < nnz_loc; ++j) colind[j] = perm_c[colind[j]]; t = SuperLU_timer_(); dist_mem_use = zdist_psymbtonum(Fact, n, A, ScalePermstruct, &Pslu_freeable, LUstruct, grid); if (dist_mem_use > 0) ABORT ("Not enough memory available for dist_psymbtonum\n"); stat->utime[DIST] = SuperLU_timer_() - t; } /*if (!iam) printf ("\tDISTRIBUTE time %8.2f\n", stat->utime[DIST]);*/ /* Perform numerical factorization in parallel. */ t = SuperLU_timer_(); // #pragma omp parallel // { // #pragma omp master // { pzgstrf(options, m, n, anorm, LUstruct, grid, stat, info); stat->utime[FACT] = SuperLU_timer_() - t; // } // } #if ( PRNTlevel>=2 ) /* ------------------------------------------------------------ SUM OVER ALL ENTRIES OF A AND PRINT NNZ AND SIZE OF A. ------------------------------------------------------------*/ Astore = (NRformat_loc *) A->Store; xsup = Glu_persist->xsup; nzval_a = Astore->nzval; asum.r=0.0; asum.i=0.0; for (i = 0; i < Astore->m_loc; ++i) { for (j = Astore->rowptr[i]; j < Astore->rowptr[i+1]; ++j) { z_add(&asum,&asum,&nzval_a[j]); } } nsupers = Glu_persist->supno[n-1] + 1; nsupers_j = CEILING( nsupers, grid->npcol ); /* Number of local block columns */ lsum.r=0.0; lsum.i=0.0; for (lk=0;lk<nsupers_j;++lk){ lsub = LUstruct->Llu->Lrowind_bc_ptr[lk]; lusup = LUstruct->Llu->Lnzval_bc_ptr[lk]; if(lsub){ k = MYCOL(grid->iam, grid)+lk*grid->npcol; /* not sure */ knsupc = SuperSize( k ); nsupr = lsub[1]; for (j=0; j<knsupc; ++j) for (i = 0; i < nsupr; ++i) z_add(&lsum,&lsum,&lusup[j*nsupr+i]); } } MPI_Allreduce( &(asum.r), &(asum_tot.r),1, MPI_DOUBLE, MPI_SUM, grid->comm ); MPI_Allreduce( &(asum.i), &(asum_tot.i),1, MPI_DOUBLE, MPI_SUM, grid->comm ); MPI_Allreduce( &(lsum.r), &(lsum_tot.r),1, MPI_DOUBLE, MPI_SUM, grid->comm ); MPI_Allreduce( &(lsum.i), &(lsum_tot.i),1, MPI_DOUBLE, MPI_SUM, grid->comm ); MPI_Allreduce( &Astore->rowptr[Astore->m_loc], &nnz_tot,1, mpi_int_t, MPI_SUM, grid->comm ); // MPI_Bcast( &nnzLU, 1, mpi_int_t, 0, grid->comm ); MPI_Comm_rank( MPI_COMM_WORLD, &iam_g ); if (!iam_g) { print_options_dist(options); fflush(stdout); } printf(".. Ainfo mygid %5d mysid %5d nnz_loc " IFMT " sum_loc %e lsum_loc %e nnz "IFMT " nnzLU %ld sum %e lsum %e N "IFMT "\n", iam_g,iam,Astore->rowptr[Astore->m_loc],asum.r+asum.i, lsum.r+lsum.i, nnz_tot,nnzLU,asum_tot.r+asum_tot.i,lsum_tot.r+lsum_tot.i,A->ncol); fflush(stdout); #endif #if 0 // #ifdef GPU_PROF // if(!iam ) // { // char* ttemp; // ttemp = getenv("IO_FILE"); // if(ttemp!=NULL) // { // printf("File being opend is %s\n",ttemp ); // FILE* fp; // fp = fopen(ttemp,"w"); // if(!fp) // { // fprintf(stderr," Couldn't open output file %s\n",ttemp); // } // int nsup=Glu_persist->supno[n-1]+1; // int ii; // for (ii = 0; ii < nsup; ++ii) // { // fprintf(fp,"%d,%d,%d,%d,%d,%d\n",gs1.mnk_min_stats[ii],gs1.mnk_min_stats[ii+nsup], // gs1.mnk_min_stats[ii+2*nsup], // gs1.mnk_max_stats[ii],gs1.mnk_max_stats[ii+nsup],gs1.mnk_max_stats[ii+2*nsup]); // } // // lastly put the timeing stats that we need // fprintf(fp,"Min %lf Max %lf totaltime %lf \n",gs1.osDgemmMin,gs1.osDgemmMax,stat->utime[FACT]); // fclose(fp); // } // } // #endif #endif if ( options->PrintStat ) { int_t TinyPivots; float for_lu, total, max, avg, temp; zQuerySpace_dist(n, LUstruct, grid, stat, &num_mem_usage); if (parSymbFact == TRUE) { /* The memory used in the redistribution routine includes the memory used for storing the symbolic structure and the memory allocated for numerical factorization */ temp = SUPERLU_MAX(symb_mem_usage.total, -dist_mem_use); if ( options->RowPerm != NO ) temp = SUPERLU_MAX(temp, GA_mem_use); } else { temp = SUPERLU_MAX ( symb_mem_usage.total + GA_mem_use, /* symbfact step */ symb_mem_usage.for_lu + dist_mem_use + num_mem_usage.for_lu /* distribution step */ ); } temp = SUPERLU_MAX(temp, num_mem_usage.total); MPI_Reduce( &temp, &max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm ); MPI_Reduce( &temp, &avg, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm ); MPI_Allreduce( &stat->TinyPivots, &TinyPivots, 1, mpi_int_t, MPI_SUM, grid->comm ); stat->TinyPivots = TinyPivots; MPI_Reduce( &num_mem_usage.for_lu, &for_lu, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm ); MPI_Reduce( &num_mem_usage.total, &total, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm ); if (!iam) { printf("\n** Memory Usage **********************************\n"); printf("** NUMfact space (MB): (sum-of-all-processes)\n" " L\\U : %8.2f | Total : %8.2f\n", for_lu * 1e-6, total * 1e-6); printf("** Total highmark (MB):\n" " Sum-of-all : %8.2f | Avg : %8.2f | Max : %8.2f\n", avg * 1e-6, avg / grid->nprow / grid->npcol * 1e-6, max * 1e-6); printf("**************************************************\n"); fflush(stdout); } } /* end printing stats */ } /* end if (!factored) */ if ( options->Fact == DOFACT || options->Fact == SamePattern ) { /* Need to reset the solve's communication pattern, because perm_r[] and/or perm_c[] is changed. */ if ( options->SolveInitialized == YES ) { /* Initialized before */ zSolveFinalize(options, SOLVEstruct); /* Clean up structure */ options->SolveInitialized = NO; /* Reset the solve state */ } } #if 0 /* Need to revisit: Why the following is not good enough for X-to-B distribution -- inv_perm_c changed */ pxgstrs_finalize(SOLVEstruct->gstrs_comm); pxgstrs_init(A->ncol, m_loc, nrhs, fst_row, perm_r, perm_c, grid, LUstruct->Glu_persist, SOLVEstruct); #endif /* ------------------------------------------------------------ Compute the solution matrix X. ------------------------------------------------------------*/ if ( nrhs && *info == 0 ) { if ( !(b_work = doublecomplexMalloc_dist(n)) ) ABORT("Malloc fails for b_work[]"); /* ------------------------------------------------------------ Scale the right-hand side if equilibration was performed. ------------------------------------------------------------*/ if ( notran ) { if ( rowequ ) { b_col = B; for (j = 0; j < nrhs; ++j) { irow = fst_row; for (i = 0; i < m_loc; ++i) { zd_mult(&b_col[i], &b_col[i], R[irow]); ++irow; } b_col += ldb; } } } else if ( colequ ) { b_col = B; for (j = 0; j < nrhs; ++j) { irow = fst_row; for (i = 0; i < m_loc; ++i) { zd_mult(&b_col[i], &b_col[i], C[irow]); ++irow; } b_col += ldb; } } /* Save a copy of the right-hand side. */ ldx = ldb; if ( !(X = doublecomplexMalloc_dist(((size_t)ldx) * nrhs)) ) ABORT("Malloc fails for X[]"); x_col = X; b_col = B; for (j = 0; j < nrhs; ++j) { #if 0 /* Sherry */ for (i = 0; i < m_loc; ++i) x_col[i] = b_col[i]; #endif memcpy(x_col, b_col, m_loc * sizeof(doublecomplex)); x_col += ldx; b_col += ldb; } /* ------------------------------------------------------------ Solve the linear system. ------------------------------------------------------------*/ if ( options->SolveInitialized == NO ) { /* First time */ zSolveInit(options, A, perm_r, perm_c, nrhs, LUstruct, grid, SOLVEstruct); /* Inside this routine, SolveInitialized is set to YES. For repeated call to pzgssvx(), no need to re-initialilze the Solve data & communication structures, unless a new factorization with Fact == DOFACT or SamePattern is asked for. */ } if ( options->DiagInv==YES && (options->SolveInitialized == NO || Fact == SamePattern || Fact == SamePattern_SameRowPerm) ) { pzCompute_Diag_Inv(n, LUstruct, grid, stat, info); } // #pragma omp parallel // { // #pragma omp master // { pzgstrs(n, LUstruct, ScalePermstruct, grid, X, m_loc, fst_row, ldb, nrhs, SOLVEstruct, stat, info); // } // } /* ------------------------------------------------------------ Use iterative refinement to improve the computed solution and compute error bounds and backward error estimates for it. ------------------------------------------------------------*/ if ( options->IterRefine ) { /* Improve the solution by iterative refinement. */ int_t *it; int_t *colind_gsmv = SOLVEstruct->A_colind_gsmv; /* This was allocated and set to NULL in zSolveInit() */ SOLVEstruct_t *SOLVEstruct1; /* Used by refinement. */ t = SuperLU_timer_(); if ( options->RefineInitialized == NO || Fact == DOFACT ) { /* All these cases need to re-initialize gsmv structure */ if ( options->RefineInitialized ) pzgsmv_finalize(SOLVEstruct->gsmv_comm); pzgsmv_init(A, SOLVEstruct->row_to_proc, grid, SOLVEstruct->gsmv_comm); /* Save a copy of the transformed local col indices in colind_gsmv[]. */ if ( colind_gsmv ) SUPERLU_FREE(colind_gsmv); if ( !(it = intMalloc_dist(nnz_loc)) ) ABORT("Malloc fails for colind_gsmv[]"); colind_gsmv = SOLVEstruct->A_colind_gsmv = it; for (i = 0; i < nnz_loc; ++i) colind_gsmv[i] = colind[i]; options->RefineInitialized = YES; } else if ( Fact == SamePattern || Fact == SamePattern_SameRowPerm ) { doublecomplex atemp; int_t k, jcol, p; /* Swap to beginning the part of A corresponding to the local part of X, as was done in pzgsmv_init() */ for (i = 0; i < m_loc; ++i) { /* Loop through each row */ k = rowptr[i]; for (j = rowptr[i]; j < rowptr[i+1]; ++j) { jcol = colind[j]; p = SOLVEstruct->row_to_proc[jcol]; if ( p == iam ) { /* Local */ atemp = a[k]; a[k] = a[j]; a[j] = atemp; ++k; } } } /* Re-use the local col indices of A obtained from the previous call to pzgsmv_init() */ for (i = 0; i < nnz_loc; ++i) colind[i] = colind_gsmv[i]; } if ( nrhs == 1 ) { /* Use the existing solve structure */ SOLVEstruct1 = SOLVEstruct; } else { /* For nrhs > 1, since refinement is performed for RHS one at a time, the communication structure for pdgstrs is different than the solve with nrhs RHS. So we use SOLVEstruct1 for the refinement step. */ if ( !(SOLVEstruct1 = (SOLVEstruct_t *) SUPERLU_MALLOC(sizeof(SOLVEstruct_t))) ) ABORT("Malloc fails for SOLVEstruct1"); /* Copy the same stuff */ SOLVEstruct1->row_to_proc = SOLVEstruct->row_to_proc; SOLVEstruct1->inv_perm_c = SOLVEstruct->inv_perm_c; SOLVEstruct1->num_diag_procs = SOLVEstruct->num_diag_procs; SOLVEstruct1->diag_procs = SOLVEstruct->diag_procs; SOLVEstruct1->diag_len = SOLVEstruct->diag_len; SOLVEstruct1->gsmv_comm = SOLVEstruct->gsmv_comm; SOLVEstruct1->A_colind_gsmv = SOLVEstruct->A_colind_gsmv; /* Initialize the *gstrs_comm for 1 RHS. */ if ( !(SOLVEstruct1->gstrs_comm = (pxgstrs_comm_t *) SUPERLU_MALLOC(sizeof(pxgstrs_comm_t))) ) ABORT("Malloc fails for gstrs_comm[]"); pxgstrs_init(n, m_loc, 1, fst_row, perm_r, perm_c, grid, Glu_persist, SOLVEstruct1); } pzgsrfs(n, A, anorm, LUstruct, ScalePermstruct, grid, B, ldb, X, ldx, nrhs, SOLVEstruct1, berr, stat, info); /* Deallocate the storage associated with SOLVEstruct1 */ if ( nrhs > 1 ) { pxgstrs_finalize(SOLVEstruct1->gstrs_comm); SUPERLU_FREE(SOLVEstruct1); } stat->utime[REFINE] = SuperLU_timer_() - t; } /* end if IterRefine */ /* Permute the solution matrix B <= Pc'*X. */ pzPermute_Dense_Matrix(fst_row, m_loc, SOLVEstruct->row_to_proc, SOLVEstruct->inv_perm_c, X, ldx, B, ldb, nrhs, grid); #if ( DEBUGlevel>=2 ) printf("\n (%d) .. After pzPermute_Dense_Matrix(): b =\n", iam); for (i = 0; i < m_loc; ++i) printf("\t(%d)\t%4d\t%.10f\n", iam, i+fst_row, B[i]); #endif /* Transform the solution matrix X to a solution of the original system before equilibration. */ if ( notran ) { if ( colequ ) { b_col = B; for (j = 0; j < nrhs; ++j) { irow = fst_row; for (i = 0; i < m_loc; ++i) { zd_mult(&b_col[i], &b_col[i], C[irow]); ++irow; } b_col += ldb; } } } else if ( rowequ ) { b_col = B; for (j = 0; j < nrhs; ++j) { irow = fst_row; for (i = 0; i < m_loc; ++i) { zd_mult(&b_col[i], &b_col[i], R[irow]); ++irow; } b_col += ldb; } } SUPERLU_FREE(b_work); SUPERLU_FREE(X); } /* end if nrhs != 0 && *info == 0 */ #if ( PRNTlevel>=1 ) if ( !iam ) printf(".. DiagScale = %d\n", ScalePermstruct->DiagScale); #endif /* Deallocate R and/or C if it was not used. */ if ( Equil && Fact != SamePattern_SameRowPerm ) { switch ( ScalePermstruct->DiagScale ) { case NOEQUIL: SUPERLU_FREE(R); SUPERLU_FREE(C); break; case ROW: SUPERLU_FREE(C); break; case COL: SUPERLU_FREE(R); break; } } #if 0 if ( !factored && Fact != SamePattern_SameRowPerm && !parSymbFact) Destroy_CompCol_Permuted_dist(&GAC); #endif #if ( DEBUGlevel>=1 ) CHECK_MALLOC(iam, "Exit pzgssvx()"); #endif }
soxr.c
/* SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net * Licence for this file: LGPL v2.1 See LICENCE for details. */ #include <math.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "soxr.h" #include "data-io.h" #include "internal.h" char const * soxr_version(void) { return "libsoxr-" SOXR_THIS_VERSION_STR; } typedef void sample_t; /* float or double */ typedef void (* fn_t)(void); typedef fn_t control_block_t[10]; #define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0]) #define resampler_process (*(void (*)(void *, size_t))p->control_block[1]) #define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2]) #define resampler_flush (*(void (*)(void *))p->control_block[3]) #define resampler_close (*(void (*)(void *))p->control_block[4]) #define resampler_delay (*(double (*)(void *))p->control_block[5]) #define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6]) #define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7]) #define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8]) #define resampler_id (*(char const * (*)(void))p->control_block[9]) typedef void * resampler_t; /* For one channel. */ typedef void * resampler_shared_t; /* Between channels. */ typedef void (* deinterleave_t)(sample_t * * dest, soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch); typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest, sample_t const * const * src, size_t, unsigned, unsigned long *); struct soxr { unsigned num_channels; double io_ratio; soxr_error_t error; soxr_quality_spec_t q_spec; soxr_io_spec_t io_spec; soxr_runtime_spec_t runtime_spec; void * input_fn_state; soxr_input_fn_t input_fn; size_t max_ilen; resampler_shared_t shared; resampler_t * resamplers; control_block_t control_block; deinterleave_t deinterleave; interleave_t interleave; void * * channel_ptrs; size_t clips; unsigned long seed; int flushing; }; /* TODO: these should not be here. */ #define TO_3dB(a) ((1.6e-6*a-7.5e-4)*a+.646) #define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */ soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags) { soxr_quality_spec_t spec, * p = &spec; unsigned quality = recipe & 0xf; double rej; memset(p, 0, sizeof(*p)); if (quality > 13) { p->e = "invalid quality type"; return spec; } if (quality == 13) quality = 6; else if (quality > 10) quality = 0; p->phase_response = "\62\31\144"[(recipe & 0x30)>>8]; p->stopband_begin = 1; p->precision = !quality? 0: quality < 3? 16 : quality < 8? 4 + quality * 4 : 55 - quality * 4; rej = p->precision * linear_to_dB(2.); p->flags = flags; if (quality < 8) { p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / TO_3dB(rej); if (quality <= 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } else { static float const bw[] = {.931f, .832f, .663f}; p->passband_end = bw[quality - 8]; if (quality - 8 == 2) p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM; } if (recipe & SOXR_STEEP_FILTER) p->passband_end = 1 - .01 / TO_3dB(rej); return spec; } char const * soxr_engine(soxr_t p) { return resampler_id(); } size_t * soxr_num_clips(soxr_t p) { return &p->clips; } soxr_error_t soxr_error(soxr_t p) { return p->error; } soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads) { soxr_runtime_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); p->log2_min_dft_size = 10; p->log2_large_dft_size = 17; p->coef_size_kbytes = 400; p->num_threads = num_threads; return spec; } soxr_io_spec_t soxr_io_spec( soxr_datatype_t itype, soxr_datatype_t otype) { soxr_io_spec_t spec, * p = &spec; memset(p, 0, sizeof(*p)); if ((itype | otype) >= SOXR_SPLIT * 2) p->e = "invalid io datatype(s)"; else { p->itype = itype; p->otype = otype; p->scale = 1; } return spec; } #if HAVE_SIMD static bool cpu_has_simd(void) { #if defined __x86_64__ || defined _M_X64 return true; #elif defined __GNUC__ && defined i386 uint32_t eax, ebx, ecx, edx; __asm__ __volatile__ ( "pushl %%ebx \n\t" "cpuid \n\t" "movl %%ebx, %1\n\t" "popl %%ebx \n\t" : "=a"(eax), "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"(1) : "cc" ); return !!(edx & 0x06000000); #elif defined _MSC_VER && defined _M_IX86 uint32_t d; __asm { xor eax, eax inc eax push ebx cpuid pop ebx mov d, edx } return !!(d & 0x06000000); #endif return false; } #endif extern control_block_t _soxr_rate32s_cb, _soxr_rate32_cb, _soxr_rate64_cb, _soxr_vr32_cb; soxr_t soxr_create( double input_rate, double output_rate, unsigned num_channels, soxr_error_t * error0, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { double io_ratio = output_rate? input_rate? input_rate / output_rate : -1 : input_rate? -1 : 0; static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768}; soxr_t p = 0; soxr_error_t error = 0; if (q_spec && q_spec->e) error = q_spec->e; else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2) error = "invalid io datatype(s)"; if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed"; if (p) { p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0); if (q_spec) { /* Backwards compatibility with original API: */ if (p->q_spec.passband_end > 2) p->q_spec.passband_end /= 100; if (p->q_spec.stopband_begin > 2) p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100; } p->io_ratio = io_ratio; p->num_channels = num_channels; if (io_spec) p->io_spec = *io_spec; else p->io_spec.scale = 1; p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1); p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] / datatype_full_scale[p->io_spec.itype & 3]; p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p; #if HAVE_SINGLE_PRECISION if (!HAVE_DOUBLE_PRECISION || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION)) || (p->q_spec.flags & SOXR_VR)) { p->deinterleave = (deinterleave_t)_soxr_deinterleave_f; p->interleave = (interleave_t)_soxr_interleave_f; memcpy(&p->control_block, (p->q_spec.flags & SOXR_VR)? &_soxr_vr32_cb : #if HAVE_SIMD cpu_has_simd()? &_soxr_rate32s_cb : #endif &_soxr_rate32_cb, sizeof(p->control_block)); } #if HAVE_DOUBLE_PRECISION else #endif #endif #if HAVE_DOUBLE_PRECISION { p->deinterleave = (deinterleave_t)_soxr_deinterleave; p->interleave = (interleave_t)_soxr_interleave; memcpy(&p->control_block, &_soxr_rate64_cb, sizeof(p->control_block)); } #endif if (p->num_channels && io_ratio) error = soxr_set_io_ratio(p, io_ratio, 0); } if (error) soxr_delete(p), p = 0; if (error0) *error0 = error; return p; } soxr_error_t soxr_set_input_fn(soxr_t p, soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen) { p->input_fn_state = input_fn_state; p->input_fn = input_fn; p->max_ilen = max_ilen? max_ilen : (size_t)-1; return 0; } static void soxr_delete0(soxr_t p) { unsigned i; if (p->resamplers) for (i = 0; i < p->num_channels; ++i) { if (p->resamplers[i]) resampler_close(p->resamplers[i]); free(p->resamplers[i]); } free(p->resamplers); free(p->channel_ptrs); free(p->shared); memset(p, 0, sizeof(*p)); } double soxr_delay(soxr_t p) { return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0; } static soxr_error_t fatal_error(soxr_t p, soxr_error_t error) { soxr_delete0(p); return p->error = error; } static soxr_error_t initialise(soxr_t p) { unsigned i; size_t shared_size, channel_size; resampler_sizes(&shared_size, &channel_size); p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels); p->shared = calloc(shared_size, 1); p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels); if (!p->shared || !p->channel_ptrs || !p->resamplers) return fatal_error(p, "malloc failed"); for (i = 0; i < p->num_channels; ++i) { soxr_error_t error; if (!(p->resamplers[i] = calloc(channel_size, 1))) return fatal_error(p, "malloc failed"); error = resampler_create( p->resamplers[i], p->shared, p->io_ratio, &p->q_spec, &p->runtime_spec, p->io_spec.scale); if (error) return fatal_error(p, error); } return 0; } soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels) { if (!p) return "invalid soxr_t pointer"; if (num_channels == p->num_channels) return p->error; if (!num_channels) return "invalid # of channels"; if (p->resamplers) return "# of channels can't be changed"; p->num_channels = num_channels; return soxr_set_io_ratio(p, p->io_ratio, 0); } soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len) { unsigned i; soxr_error_t error; if (!p) return "invalid soxr_t pointer"; if ((error = p->error)) return error; if (!p->num_channels) return "must set # channels before O/I ratio"; if (io_ratio <= 0) return "I/O ratio out-of-range"; if (!p->channel_ptrs) { p->io_ratio = io_ratio; return initialise(p); } if (p->control_block[8]) { for (i = 0; !error && i < p->num_channels; ++i) resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len); return error; } return fabs(p->io_ratio - io_ratio) < 1e-15? 0 : "Varying O/I ratio is not supported with this quality level"; } void soxr_delete(soxr_t p) { if (p) soxr_delete0(p), free(p); } soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */ { if (p) { struct soxr tmp = *p; soxr_delete0(p); memset(p, 0, sizeof(*p)); p->input_fn = tmp.input_fn; p->runtime_spec = tmp.runtime_spec; p->q_spec = tmp.q_spec; p->io_spec = tmp.io_spec; p->num_channels = tmp.num_channels; p->input_fn_state = tmp.input_fn_state; memcpy(p->control_block, tmp.control_block, sizeof(p->control_block)); p->deinterleave = tmp.deinterleave; p->interleave = tmp.interleave; return 0; } return "invalid soxr_t pointer"; } static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len) { sample_t * dest = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1); } static size_t soxr_input(soxr_t p, void const * in, size_t len) { bool separated = !!(p->io_spec.itype & SOXR_SPLIT); unsigned i; if (!p || p->error) return 0; if (!in && len) {p->error = "null input buffer pointer"; return 0;} if (!len) { p->flushing = true; return 0; } if (separated) for (i = 0; i < p->num_channels; ++i) soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len); else { for (i = 0; i < p->num_channels; ++i) p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len); (*p->deinterleave)( (sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels); } return len; } static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated) { sample_t const * src; if (p->flushing) resampler_flush(p->resamplers[i]); resampler_process(p->resamplers[i], len); src = resampler_output(p->resamplers[i], NULL, &len); if (separated) p->clips += (p->interleave)(p->io_spec.otype, &dest, &src, len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); else p->channel_ptrs[i] = (void /* const */ *)src; return len; } static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len) { unsigned u; size_t done = 0; bool separated = !!(p->io_spec.otype & SOXR_SPLIT); #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done1; done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated); if (!i) done = done1; } else #endif for (u = 0; u < p->num_channels; ++u) done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated); if (!separated) p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs, done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed); return done; } size_t soxr_output(soxr_t p, void * out, size_t len0) { size_t odone, odone0 = 0, olen = len0, osize, idone; size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio)); void const * in = out; /* Set to !=0, so that caller may leave unset. */ bool was_flushing; if (!p || p->error) return 0; if (!out && len0) {p->error = "null output buffer pointer"; return 0;} do { odone = soxr_output_no_callback(p, out, olen); odone0 += odone; if (odone0 == len0 || !p->input_fn || p->flushing) break; osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels; out = (char *)out + osize * odone; olen -= odone; idone = p->input_fn(p->input_fn_state, &in, ilen); was_flushing = p->flushing; if (!in) p->error = "input function reported failure"; else soxr_input(p, in, idone); } while (odone || idone || (!was_flushing && p->flushing)); return odone0; } static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen) { size_t result; #if 0 if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING) result = rate_i_for_o(p->resamplers[0], olen); else #endif result = (size_t)ceil((double)olen * p->io_ratio); return min(result, ilen); } #if 0 static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen) { size_t result = (size_t)ceil((double)ilen / p->io_ratio); return min(result, olen); } #endif soxr_error_t soxr_process(soxr_t p, void const * in , size_t ilen0, size_t * idone0, void * out, size_t olen , size_t * odone0) { size_t ilen, idone, odone = 0; unsigned u; bool flush_requested = false; if (!p) return "null pointer"; if (!in) flush_requested = true, ilen = ilen0 = 0; else { if ((ptrdiff_t)ilen0 < 0) flush_requested = true, ilen0 = ~ilen0; if (idone0 && (1 || flush_requested)) ilen = soxr_i_for_o(p, olen, ilen0); else ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/; } p->flushing |= ilen == ilen0 && flush_requested; if (!out && !in) idone = ilen; else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */ #if defined _OPENMP int i; if (!p->runtime_spec.num_threads && p->num_channels > 1) #pragma omp parallel for for (i = 0; i < (int)p->num_channels; ++i) { size_t done; if (in) soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen); done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true); if (!i) odone = done; } else #endif for (u = 0; u < p->num_channels; ++u) { if (in) soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen); odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true); } idone = ilen; } else { idone = ilen? soxr_input (p, in , ilen) : 0; odone = soxr_output(p, out, olen); } if (idone0) *idone0 = idone; if (odone0) *odone0 = odone; return p->error; } soxr_error_t soxr_oneshot( double irate, double orate, unsigned num_channels, void const * in , size_t ilen, size_t * idone, void * out, size_t olen, size_t * odone, soxr_io_spec_t const * io_spec, soxr_quality_spec_t const * q_spec, soxr_runtime_spec_t const * runtime_spec) { soxr_t resampler = NULL; soxr_error_t error = q_spec? q_spec->e : 0; if (!error) { soxr_quality_spec_t q_spec1; if (!q_spec) q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1; resampler = soxr_create(irate, orate, num_channels, &error, io_spec, q_spec, runtime_spec); } if (!error) { error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone); soxr_delete(resampler); } return error; } soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error) { if (!p) return "null pointer"; if (!p->error && p->error != error) return p->error; p->error = error; return 0; }
thread_info.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_UTIL_THREAD_INFO_H_ #define CORE_UTIL_THREAD_INFO_H_ #include <omp.h> #include <sched.h> #include <atomic> #include <vector> #include "core/util/log.h" #include "core/util/numa.h" namespace bdm { /// \brief This class stores information about each thread. (e.g. to which NUMA /// node it belongs to.) /// NB: Threads **must** be bound to CPUs using `OMP_PROC_BIND=true`. class ThreadInfo { public: static ThreadInfo* GetInstance() { static ThreadInfo kInstance; return &kInstance; } // FIXME add test int GetMyThreadId() const { return omp_get_thread_num(); } // FIXME add test int GetMyNumaNode() const { return GetNumaNode(GetMyThreadId()); } /// Return the numa thread id of an openmp thread. int GetMyNumaThreadId() const { return GetNumaThreadId(GetMyThreadId()); } /// Returns the number of NUMA nodes on this machine int GetNumaNodes() const { return numa_nodes_; } /// Returns the numa node the given openmp thread is bound to. int GetNumaNode(int omp_thread_id) const { return thread_numa_mapping_[omp_thread_id]; } /// Returns the number of threads in a given NUMA node. int GetThreadsInNumaNode(int numa_node) const { return threads_in_numa_[numa_node]; } /// Return the numa thread id of an openmp thread. int GetNumaThreadId(int omp_thread_id) const { return numa_thread_id_[omp_thread_id]; } /// Return the maximum number of threads. int GetMaxThreads() const { return max_threads_; } /// Returns a unique thread id even for parallel regions that /// don't use OpenMP. uint64_t GetUniversalThreadId() const { thread_local uint64_t kTid = thread_counter_++; return kTid; } uint64_t GetMaxUniversalThreadId() const { return thread_counter_; } /// Renews the metadata.\n /// Whenever a thread is scheduled on a different cpu, e.g. using /// `numa_run_on_node`, `Renew()` must be called to update the thread /// metadata. void Renew() { max_threads_ = omp_get_max_threads(); numa_nodes_ = numa_num_configured_nodes(); thread_numa_mapping_.clear(); numa_thread_id_.clear(); threads_in_numa_.clear(); thread_numa_mapping_.resize(max_threads_, 0); numa_thread_id_.resize(max_threads_, 0); threads_in_numa_.resize(numa_nodes_, 0); // (openmp thread id -> numa node) #pragma omp parallel { int tid = omp_get_thread_num(); thread_numa_mapping_[tid] = numa_node_of_cpu(sched_getcpu()); } // (numa -> number of associated threads), and // (omp_thread_id -> thread id in numa) for (uint16_t n = 0; n < numa_nodes_; n++) { uint64_t cnt = 0; for (uint64_t t = 0; t < max_threads_; t++) { int numa = thread_numa_mapping_[t]; if (n == numa) { numa_thread_id_[t] = cnt; cnt++; } } threads_in_numa_[n] = cnt; } } friend std::ostream& operator<<(std::ostream& str, const ThreadInfo& ti) { str << "max_threads\t\t: " << ti.max_threads_ << "\nnum_numa nodes\t\t: " << ti.numa_nodes_; str << "\nthread to numa mapping\t: "; for (auto& el : ti.thread_numa_mapping_) { str << el << " "; } str << "\nthread id in numa node\t: "; for (auto& el : ti.numa_thread_id_) { str << el << " "; } str << "\nnum threads per numa\t: "; for (auto& el : ti.threads_in_numa_) { str << el << " "; } str << "\n"; return str; } private: static std::atomic<uint64_t> thread_counter_; /// Maximum number of threads for this simulation. uint64_t max_threads_; /// Number of NUMA nodes on this machine. uint16_t numa_nodes_; /// Contains the mapping thread id -> numa node \n /// vector position = omp_thread_id \n /// vector value = numa node std::vector<int> thread_numa_mapping_; /// Contains the mapping omp_thread_id -> numa thread id \n /// each thread in a numa domain has a unique id in the range 0 to number \n /// of threads in this numa domain std::vector<int> numa_thread_id_; /// Contains the mapping numa node -> total number of threads in this numa /// node \n /// vector position: numa node \n /// vector value number of threads std::vector<int> threads_in_numa_; ThreadInfo() { auto proc_bind = omp_get_proc_bind(); if (proc_bind != 1 && proc_bind != 4) { // 4 corresponds to OMP_PROC_BIND=spread // Due to some reason some OpenMP implementations set proc bind to spread // even though OMP_PROC_BIND is set to true. // A performance analysis showed almost identical results between true, // and spread. Log::Warning( "ThreadInfo::ThreadInfo", "The environment variable OMP_PROC_BIND must be set to " "true prior to running BioDynaMo ('export OMP_PROC_BIND=true')"); } Renew(); } }; } // namespace bdm #endif // CORE_UTIL_THREAD_INFO_H_
test11.c
int main() { int i; l: while(1) { #pragma omp parallel { } } if (1) { goto l; } }
bml_normalize_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_normalize.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_add_ellpack.h" #include "bml_allocate_ellpack.h" #include "bml_normalize_ellpack.h" #include "bml_scale_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <float.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* Normalize ellpack matrix given Gershgorin bounds. * * \ingroup normalize_group * * \param A The matrix * \param mineval Calculated min value * \param maxeval Calculated max value */ void TYPED_FUNC( bml_normalize_ellpack) ( bml_matrix_ellpack_t * A, double mineval, double maxeval) { double maxminusmin = maxeval - mineval; double gershfact = maxeval / maxminusmin; REAL_T scalar = (REAL_T) - 1.0 / maxminusmin; double threshold = 0.0; bml_scale_inplace_ellpack(&scalar, A); bml_add_identity_ellpack(A, gershfact, threshold); } /** Calculate Gershgorin bounds for an ellpack matrix. * * \ingroup normalize_group * * \param A The matrix * \param nrows Number of rows to use * returns mineval Calculated min value * returns maxeval Calculated max value */ void *TYPED_FUNC( bml_gershgorin_ellpack) ( bml_matrix_ellpack_t * A) { REAL_T radius, absham, dvalue; double emin = DBL_MAX; double emax = DBL_MIN; double *eval = bml_allocate_memory(sizeof(double) * 2); int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int myRank = bml_getMyRank(); REAL_T rad[N]; REAL_T dval[N]; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_nnz, A_index, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(rad, dval) \ private(absham, radius, dvalue) \ reduction(max:emax) \ reduction(min:emin) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { radius = 0.0; dvalue = 0.0; for (int j = 0; j < A_nnz[i]; j++) { if (i == A_index[ROWMAJOR(i, j, N, M)]) dvalue = A_value[ROWMAJOR(i, j, N, M)]; else { absham = ABS(A_value[ROWMAJOR(i, j, N, M)]); radius += (double) absham; } } dval[i] = dvalue; rad[i] = radius; /* emax = (emax > REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius)); emin = (emin < REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius)); */ } //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { if (REAL_PART(dval[i] + rad[i]) > emax) emax = REAL_PART(dval[i] + rad[i]); if (REAL_PART(dval[i] - rad[i]) < emin) emin = REAL_PART(dval[i] - rad[i]); } //printf("%d: emin = %e emax = %e\n", myRank, emin, emax); #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_minRealReduce(&emin); bml_maxRealReduce(&emax); } #endif eval[0] = emin; eval[1] = emax; //printf("Global %d: emin = %e emax = %e\n", myRank, emin, emax); return eval; } /** Calculate Gershgorin bounds for a partial ellpack matrix. * * \ingroup normalize_group * * \param A The matrix * \param nrows Number of rows to use * returns mineval Calculated min value * returns maxeval Calculated max value */ void *TYPED_FUNC( bml_gershgorin_partial_ellpack) ( bml_matrix_ellpack_t * A, int nrows) { REAL_T radius, absham, dvalue; double emin = DBL_MAX; double emax = DBL_MIN; double *eval = bml_allocate_memory(sizeof(double) * 2); int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; REAL_T rad[N]; REAL_T dval[N]; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_nnz, A_index, A_value) \ shared(rad, dval) \ private(absham, radius, dvalue) \ reduction(max:emax) \ reduction(min:emin) for (int i = 0; i < nrows; i++) { radius = 0.0; dvalue = 0.0; for (int j = 0; j < A_nnz[i]; j++) { if (i == A_index[ROWMAJOR(i, j, N, M)]) dvalue = A_value[ROWMAJOR(i, j, N, M)]; else { absham = ABS(A_value[ROWMAJOR(i, j, N, M)]); radius += (double) absham; } } dval[i] = dvalue; rad[i] = radius; } for (int i = 0; i < nrows; i++) { if (REAL_PART(dval[i] + rad[i]) > emax) emax = REAL_PART(dval[i] + rad[i]); if (REAL_PART(dval[i] - rad[i]) < emin) emin = REAL_PART(dval[i] - rad[i]); } eval[0] = emin; eval[1] = emax; return eval; }
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS #define MAX_THREADS 128 // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define MAX_THREADS 128 #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } // For debugging void print_xmm(fptype in, char* s) { printf("%s: %f\n", s, in); } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Sholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS int tids[nThreads]; for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); #else//ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef WIN32 if (nThreads > 1) { HANDLE threads[MAX_THREADS]; int nums[MAX_THREADS]; for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); } else #endif { int tid=0; bs_thread(&tid); } #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
GB_binop__bxnor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_03__bxnor_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int8) // C=scalar+B GB (_bind1st__bxnor_int8) // C=scalar+B' GB (_bind1st_tran__bxnor_int8) // C=A+scalar GB (_bind2nd__bxnor_int8) // C=A'+scalar GB (_bind2nd_tran__bxnor_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT8 || GxB_NO_BXNOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxnor_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_rar_fmt_plug.c
/* RAR 3.x cracker patch for JtR. Hacked together during * April of 2011 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. * magnum added -p mode support, using code based on libclamav * and OMP, AES-NI and OpenCL support. * * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code is based on the work of Alexander L. Roshal (C) * * The unRAR sources may be used in any software to handle RAR * archives without limitations free of charge, but cannot be used * to re-create the RAR compression algorithm, which is proprietary. * Distribution of modified unRAR sources in separate form or as a * part of other software is permitted, provided that it is clearly * stated in the documentation and source comments that the code may * not be used to develop a RAR (WinRAR) compatible archiver. * * Huge thanks to Marc Bevand <m.bevand (at) gmail.com> for releasing unrarhp * (http://www.zorinaq.com/unrarhp/) and documenting the RAR encryption scheme. * This patch is made possible by unrarhp's documentation. * * http://anrieff.net/ucbench/technical_qna.html is another useful reference * for RAR encryption scheme. * * Thanks also to Pavel Semjanov for crucial help with Huffman table checks. * * For type = 0 for files encrypted with "rar -hp ..." option * archive_name:$RAR3$*type*hex(salt)*hex(partial-file-contents):type::::archive_name * * For type = 1 for files encrypted with "rar -p ..." option * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*archive_name*offset-for-ciphertext*method:type::file_name * * or (inlined binary) * * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*1*hex(full encrypted file)*method:type::file_name * */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_ocl_rar; #elif FMT_REGISTERS_H john_register_one(&fmt_ocl_rar); #else #define STEP 0 #define SEED 256 #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/engine.h> #include <openssl/evp.h> #include <openssl/ssl.h> #include "arch.h" #include "sha.h" #if AC_BUILT #include "autoconfig.h" #endif #if _MSC_VER || __MINGW32__ || __MINGW64__ || __CYGWIN__ || HAVE_WINDOWS_H #include "win32_memmap.h" #ifndef __CYGWIN__ #include "mmap-windows.c" #elif defined HAVE_MMAP #include <sys/mman.h> #endif #elif defined(HAVE_MMAP) #include <sys/mman.h> #endif #ifdef _OPENMP #include <omp.h> #include <pthread.h> #define OMP_SCALE 32 static pthread_mutex_t *lockarray; #endif #include "crc32.h" #include "misc.h" #include "common.h" #include "formats.h" #include "dyna_salt.h" #include "memory.h" #include "params.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #include "unrar.h" #include "common-opencl.h" #include "config.h" #include "jumbo.h" #define FORMAT_LABEL "rar-opencl" #define FORMAT_NAME "RAR3" #define ALGORITHM_NAME "SHA1 OpenCL AES" #ifdef DEBUG #define BENCHMARK_COMMENT " (length 1-16)" #else #define BENCHMARK_COMMENT " (length 4)" #endif #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 22 /* Max. currently supported is 22 */ #define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH) #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(rarfile*) #define SALT_ALIGN sizeof(rarfile*) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define OCL_CONFIG "rar" static const char * warn[] = { "key xfer: " , ", len xfer: " , ", init: " , ", loop: " , ", final: ", ", key xfer: ", ", iv xfer: " }; static int split_events[] = { 3, -1, -1 }; static int crypt_all(int *pcount, struct db_salt *_salt); static int crypt_all_benchmark(int *pcount, struct db_salt *_salt); //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" #define ITERATIONS 0x40000 #define HASH_LOOPS 0x04000 // Fixed, do not change #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) static int omp_t = 1; static unsigned char *saved_salt; static unsigned char *saved_key; static int new_keys; static int (*cracked); static unpack_data_t (*unpack_data); static unsigned int *saved_len; static unsigned char *aes_key; static unsigned char *aes_iv; typedef struct { dyna_salt dsalt; /* must be first. allows dyna_salt to work */ /* place all items we are NOT going to use for salt comparison, first */ unsigned char *blob; /* data from this point on, is part of the salt for compare reasons */ unsigned char salt[8]; int type; /* 0 = -hp, 1 = -p */ /* for rar -p mode only: */ union { unsigned int w; unsigned char c[4]; } crc; unsigned long long pack_size; unsigned long long unp_size; int method; unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash. // raw_data should be word aligned, and 'ok' unsigned char raw_data[1]; } rarfile; static rarfile *cur_file; /* Determines when to use CPU instead (eg. Single mode, few keys in a call) */ #define CPU_GPU_RATIO 32 static cl_mem cl_saved_key, cl_saved_len, cl_salt, cl_OutputBuf, cl_round, cl_aes_key, cl_aes_iv; static cl_mem pinned_saved_key, pinned_saved_len, pinned_salt, pinned_aes_key, pinned_aes_iv; static cl_kernel RarInit, RarFinal; /* cRARk use 4-char passwords for CPU benchmark */ static struct fmt_tests cpu_tests[] = { {"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"}, {"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"}, {"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; /* cRARk use 5-char passwords for GPU benchmark */ static struct fmt_tests gpu_tests[] = { {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG {"$RAR3$*0*af24c0c95e9cafc7*e7f207f30dec96a5ad6f917a69d0209e", "magnum"}, {"$RAR3$*0*2653b9204daa2a8e*39b11a475f486206e2ec6070698d9bbc", "123456"}, {"$RAR3$*0*63f1649f16c2b687*8a89f6453297bcdb66bd756fa10ddd98", "abc123"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*575b083d78672e85*965f1453*48*47*1*cd3d8756438f43ab70e668792e28053f0ad7449af1c66863e3e55332bfa304b2c082b9f23b36cd4a8ebc0b743618c5b2*30", "magnum"}, {"$RAR3$*1*6f5954680c87535a*965f1453*64*47*1*c9bb398b9a5d54f035fd22be54bc6dc75822f55833f30eb4fb8cc0b8218e41e6d01824e3467475b90b994a5ddb7fe19366d293c9ee305316c2a60c3a7eb3ce5a*33", "magnum"}, /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #if defined (_OPENMP) static void lock_callback(int mode, int type, const char *file, int line) { (void)file; (void)line; if (mode & CRYPTO_LOCK) pthread_mutex_lock(&(lockarray[type])); else pthread_mutex_unlock(&(lockarray[type])); } static unsigned long thread_id(void) { return omp_get_thread_num(); } static void init_locks(void) { int i; lockarray = (pthread_mutex_t*) OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t)); for (i = 0; i < CRYPTO_num_locks(); i++) pthread_mutex_init(&(lockarray[i]), NULL); CRYPTO_set_id_callback(thread_id); CRYPTO_set_locking_callback(lock_callback); } #endif /* _OPENMP */ /* Use AES-NI if available. This is not supported with low-level calls, we have to use EVP) */ static void init_aesni(void) { ENGINE *e; const char *engine_id = "aesni"; ENGINE_load_builtin_engines(); e = ENGINE_by_id(engine_id); if (!e) { //fprintf(stderr, "AES-NI engine not available\n"); return; } if (!ENGINE_init(e)) { fprintf(stderr, "AES-NI engine could not init\n"); ENGINE_free(e); return; } if (!ENGINE_set_default(e, ENGINE_METHOD_ALL & ~ENGINE_METHOD_RAND)) { /* This should only happen when 'e' can't initialise, but the * previous statement suggests it did. */ fprintf(stderr, "AES-NI engine initialized but then failed\n"); abort(); } ENGINE_finish(e); ENGINE_free(e); } #ifndef __APPLE__ /* Apple segfaults on this :) */ static void openssl_cleanup(void) { ENGINE_cleanup(); ERR_free_strings(); CRYPTO_cleanup_all_ex_data(); EVP_cleanup(); } #endif static void create_clobj(size_t gws, struct fmt_main *self) { int i; int bench_len = strlen(self->params.tests[0].plaintext) * 2; pinned_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, UNICODE_LENGTH * gws, NULL , &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, UNICODE_LENGTH * gws, NULL , &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_key = (unsigned char*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, UNICODE_LENGTH * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_key"); memset(saved_key, 0, UNICODE_LENGTH * gws); pinned_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_saved_len = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_len = (unsigned int*)clEnqueueMapBuffer(queue[gpu_id], pinned_saved_len, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_int) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_len"); for (i = 0; i < gws; i++) saved_len[i] = bench_len; pinned_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 8, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 8, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); saved_salt = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_salt, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 8, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory saved_salt"); memset(saved_salt, 0, 8); cl_OutputBuf = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * 5 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); cl_round = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_int) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); // aes_key is uchar[16] but kernel treats it as uint[4] pinned_aes_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, sizeof(cl_uint) * 4 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_aes_key = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(cl_uint) * 4 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); aes_key = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_aes_key, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, sizeof(cl_uint) * 4 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory aes_key"); memset(aes_key, 0, 16 * gws); pinned_aes_iv = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating page-locked memory"); cl_aes_iv = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, 16 * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating device memory"); aes_iv = (unsigned char*) clEnqueueMapBuffer(queue[gpu_id], pinned_aes_iv, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, 16 * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory aes_iv"); memset(aes_iv, 0, 16 * gws); HANDLE_CLERROR(clSetKernelArg(RarInit, 0, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(RarInit, 1, sizeof(cl_mem), (void*)&cl_round), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(cl_mem), (void*)&cl_saved_key), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(cl_mem), (void*)&cl_round), "Error setting argument 2"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 3, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 3"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 4, sizeof(cl_mem), (void*)&cl_salt), "Error setting argument 4"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 5, sizeof(cl_mem), (void*)&cl_aes_iv), "Error setting argument 5"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 0, sizeof(cl_mem), (void*)&cl_saved_len), "Error setting argument 0"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 1, sizeof(cl_mem), (void*)&cl_OutputBuf), "Error setting argument 1"); HANDLE_CLERROR(clSetKernelArg(RarFinal, 2, sizeof(cl_mem), (void*)&cl_aes_key), "Error setting argument 2"); cracked = mem_alloc(sizeof(*cracked) * gws); } /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return MIN( MIN(autotune_get_task_max_work_group_size(FALSE, 0, RarInit), autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel)), autotune_get_task_max_work_group_size(FALSE, 0, RarFinal)); } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return 1; else return 64; } static void release_clobj(void) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_aes_key, aes_key, 0, NULL, NULL), "Error Unmapping aes_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_aes_iv, aes_iv, 0, NULL, NULL), "Error Unmapping aes_iv"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_key, saved_key, 0, NULL, NULL), "Error Unmapping saved_key"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_saved_len, saved_len, 0, NULL, NULL), "Error Unmapping saved_len"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_salt, saved_salt, 0, NULL, NULL), "Error Unmapping saved_salt"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(cl_aes_key), "Release aes_key"); HANDLE_CLERROR(clReleaseMemObject(cl_aes_iv), "Release aes_iv"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_key), "Release saved_key"); HANDLE_CLERROR(clReleaseMemObject(cl_saved_len), "Release saved_len"); HANDLE_CLERROR(clReleaseMemObject(cl_salt), "Release salt"); HANDLE_CLERROR(clReleaseMemObject(pinned_aes_key), "Release aes_key"); HANDLE_CLERROR(clReleaseMemObject(pinned_aes_iv), "Release aes_iv"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_key), "Release saved_key"); HANDLE_CLERROR(clReleaseMemObject(pinned_saved_len), "Release saved_len"); HANDLE_CLERROR(clReleaseMemObject(pinned_salt), "Release salt"); HANDLE_CLERROR(clReleaseMemObject(cl_OutputBuf), "Release OutputBuf"); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(RarInit), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(RarFinal), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); MEM_FREE(unpack_data); } static void clear_keys(void) { memset(saved_len, 0, sizeof(int) * global_work_size); } #undef set_key static void set_key(char *key, int index) { int plen; UTF16 buf[PLAINTEXT_LENGTH + 1]; /* UTF-16LE encode the password, encoding aware */ plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key)); if (plen < 0) plen = strlen16(buf); memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH); saved_len[index] = plen << 1; new_keys = 1; } static void *get_salt(char *ciphertext) { unsigned int i, type, ex_len; static unsigned char *ptr; /* extract data from "salt" */ char *encoded_salt; char *saltcopy = strdup(ciphertext); char *keep_ptr = saltcopy; rarfile *psalt; unsigned char tmp_salt[8]; int inlined = 1; SHA_CTX ctx; if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*)); saltcopy += 7; /* skip over "$RAR3$*" */ type = atoi(strtok(saltcopy, "*")); encoded_salt = strtok(NULL, "*"); for (i = 0; i < 8; i++) tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])]; if (type == 0) { /* rar-hp mode */ char *encoded_ct = strtok(NULL, "*"); psalt = mem_calloc(sizeof(*psalt)+16); psalt->type = type; ex_len = 16; memcpy(psalt->salt, tmp_salt, 8); for (i = 0; i < 16; i++) psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])]; psalt->blob = psalt->raw_data; psalt->pack_size = 16; } else { char *p = strtok(NULL, "*"); char crc_c[4]; unsigned long long pack_size; unsigned long long unp_size; for (i = 0; i < 4; i++) crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; pack_size = atoll(strtok(NULL, "*")); unp_size = atoll(strtok(NULL, "*")); inlined = atoi(strtok(NULL, "*")); ex_len = pack_size; /* load ciphertext. We allocate and load all files here, and they are freed when password found. */ #if HAVE_MMAP psalt = mem_calloc(sizeof(*psalt) + (inlined ? ex_len : 0)); #else psalt = mem_calloc(sizeof(*psalt)+ex_len); #endif psalt->type = type; memcpy(psalt->salt, tmp_salt, 8); psalt->pack_size = pack_size; psalt->unp_size = unp_size; memcpy(psalt->crc.c, crc_c, 4); if (inlined) { unsigned char *d = psalt->raw_data; p = strtok(NULL, "*"); for (i = 0; i < psalt->pack_size; i++) *d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; psalt->blob = psalt->raw_data; } else { FILE *fp; char *archive_name = strtok(NULL, "*"); long long pos = atoll(strtok(NULL, "*")); #if HAVE_MMAP if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } #ifdef RAR_DEBUG fprintf(stderr, "RAR mmap() len %llu offset 0\n", pos + psalt->pack_size); #endif psalt->blob = mmap(NULL, pos + psalt->pack_size, PROT_READ, MAP_SHARED, fileno(fp), 0); if (psalt->blob == MAP_FAILED) { fprintf(stderr, "Error loading file from " "archive '%s'. Archive possibly " "damaged.\n", archive_name); error(); } psalt->blob += pos; #else size_t count; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } jtr_fseek64(fp, pos, SEEK_SET); count = fread(psalt->raw_data, 1, psalt->pack_size, fp); if (count != psalt->pack_size) { fprintf(stderr, "Error loading file from archive '%s', expected %llu bytes, got %zu. Archive possibly damaged.\n", archive_name, psalt->pack_size, count); error(); } psalt->blob = psalt->raw_data; #endif fclose(fp); } p = strtok(NULL, "*"); psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])]; if (psalt->method != 0x30) #if ARCH_LITTLE_ENDIAN psalt->crc.w = ~psalt->crc.w; #else psalt->crc.w = JOHNSWAP(~psalt->crc.w); #endif } SHA1_Init(&ctx); SHA1_Update(&ctx, psalt->blob, psalt->pack_size); SHA1_Final(psalt->blob_hash, &ctx); MEM_FREE(keep_ptr); #if HAVE_MMAP psalt->dsalt.salt_alloc_needs_free = inlined; #else psalt->dsalt.salt_alloc_needs_free = 1; #endif psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0); memcpy(ptr, &psalt, sizeof(rarfile*)); return (void*)ptr; } static void set_salt(void *salt) { cur_file = *((rarfile**)salt); memcpy(saved_salt, cur_file->salt, 8); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, 8, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); } static void init(struct fmt_main *self) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%u", PLAINTEXT_LENGTH); opencl_init("$JOHN/kernels/rar_kernel.cl", gpu_id, build_opts); // create kernels to execute RarInit = clCreateKernel(program[gpu_id], "RarInit", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); crypt_kernel = clCreateKernel(program[gpu_id], "RarHashLoop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); RarFinal = clCreateKernel(program[gpu_id], "RarFinal", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel. Double-check kernel name?"); #ifdef DEBUG self->params.benchmark_comment = " (1-16 characters)"; #endif /* We mimic the lengths of cRARk for comparisons */ if (!cpu(device_info[gpu_id])) { #ifndef DEBUG self->params.benchmark_comment = " (length 5)"; #endif self->params.tests = gpu_tests; } //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 3, self, create_clobj, release_clobj, UNICODE_LENGTH + sizeof(cl_int) * 14, 0); //Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, ITERATIONS, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); self->methods.crypt_all = crypt_all; #if defined (_OPENMP) omp_t = omp_get_max_threads(); init_locks(); #endif /* _OPENMP */ if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); unpack_data = mem_calloc(sizeof(unpack_data_t) * omp_t); /* OpenSSL init */ init_aesni(); SSL_load_error_strings(); SSL_library_init(); OpenSSL_add_all_algorithms(); #ifndef __APPLE__ atexit(openssl_cleanup); #endif /* CRC-32 table init, do it before we start multithreading */ { CRC32_t crc; CRC32_Init(&crc); } } static int hexlen(char *q) { char *s = q; size_t len = strlen(q); while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return (len == (size_t)(q - s)) ? (int)(q - s) : -1 - (int)(q - s); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int mode; if (strncmp(ciphertext, "$RAR3$*", 7)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += 7; if (!(ptr = strtok(ctcopy, "*"))) /* -p or -h mode */ goto error; if (hexlen(ptr) != 1) goto error; mode = atoi(ptr); if (mode < 0 || mode > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* salt */ goto error; if (hexlen(ptr) != 16) /* 8 bytes of salt */ goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (mode == 0) { if (hexlen(ptr) != 32) /* 16 bytes of encrypted known plain */ goto error; MEM_FREE(keeptr); return 1; } else { int inlined; long long plen, ulen; if (hexlen(ptr) != 8) /* 4 bytes of CRC */ goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size */ goto error; if (strlen(ptr) > 12) { // pack_size > 1 TB? Really? fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((plen = atoll(ptr)) < 16) goto error; if (!(ptr = strtok(NULL, "*"))) /* unp_size */ goto error; if (strlen(ptr) > 12) { fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((ulen = atoll(ptr)) < 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* inlined */ goto error; if (hexlen(ptr) != 1) goto error; inlined = atoi(ptr); if (inlined < 0 || inlined > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size / archive_name */ goto error; if (inlined) { if (hexlen(ptr) != plen * 2) goto error; } else { FILE *fp; char *archive_name; archive_name = ptr; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno)); goto error; } if (!(ptr = strtok(NULL, "*"))) /* pos */ goto error; /* We could go on and actually try seeking to pos but this is enough for now */ fclose(fp); } if (!(ptr = strtok(NULL, "*"))) /* method */ goto error; } MEM_FREE(keeptr); return 1; error: #ifdef RAR_DEBUG { char buf[68]; strnzcpy(buf, ciphertext, sizeof(buf)); fprintf(stderr, "rejecting %s\n", buf); } #endif MEM_FREE(keeptr); return 0; } static char *get_key(int index) { UTF16 tmpbuf[PLAINTEXT_LENGTH + 1]; memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]); memset(&tmpbuf[saved_len[index] >> 1], 0, 2); return (char*) utf16_to_enc(tmpbuf); } #define ADD_BITS(n) \ { \ if (bits < 9) { \ hold |= ((unsigned int)*next++ << (24 - bits)); \ bits += 8; \ } \ hold <<= n; \ bits -= n; \ } /* * This function is loosely based on JimF's check_inflate_CODE2() from * pkzip_fmt. Together with the other bit-checks, we are rejecting over 96% * of the candidates without resorting to a slow full check (which in turn * may reject semi-early, especially if it's a PPM block) * * Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the * first 2 bits, which have already been decoded, and have told us we had an * LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set. * * RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed * byte order. */ static MAYBE_INLINE int check_huffman(unsigned char *next) { unsigned int bits, hold, i; int left; unsigned int ncount[4]; unsigned char *count = (unsigned char*)ncount; unsigned char bit_length[20]; #ifdef DEBUG unsigned char *was = next; #endif #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED hold = JOHNSWAP(*(unsigned int*)next); #else hold = next[3] + (((unsigned int)next[2]) << 8) + (((unsigned int)next[1]) << 16) + (((unsigned int)next[0]) << 24); #endif next += 4; // we already have the first 32 bits hold <<= 2; // we already processed 2 bits, PPM and keepOldTable bits = 32 - 2; /* First, read 20 pairs of (bitlength[, zerocount]) */ for (i = 0 ; i < 20 ; i++) { int length, zero_count; length = hold >> 28; ADD_BITS(4); if (length == 15) { zero_count = hold >> 28; ADD_BITS(4); if (zero_count == 0) { bit_length[i] = 15; } else { zero_count += 2; while (zero_count-- > 0 && i < sizeof(bit_length) / sizeof(bit_length[0])) bit_length[i++] = 0; i--; } } else { bit_length[i] = length; } } #ifdef DEBUG if (next - was > 16) { fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold); dump_stuff_msg("complete buffer", was, 16); error(); } #endif /* Count the number of codes for each code length */ memset(count, 0, 16); for (i = 0; i < 20; i++) { ++count[bit_length[i]]; } count[0] = 0; if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3]) return 0; /* No codes at all */ left = 1; for (i = 1; i < 16; ++i) { left <<= 1; left -= count[i]; if (left < 0) { return 0; /* over-subscribed */ } } if (left) { return 0; /* incomplete set */ } return 1; /* Passed this check! */ } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { int count = *pcount; size_t *lws = local_work_size ? &local_work_size : NULL; size_t gws = GET_MULTIPLE_OR_BIGGER(count, local_work_size); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * gws, saved_key, 0, NULL, multi_profilingEvent[0]), "failed in clEnqueueWriteBuffer saved_key"); BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * gws, saved_len, 0, NULL, multi_profilingEvent[1]), "failed in clEnqueueWriteBuffer saved_len"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarInit, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[2]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[3]), "failed in clEnqueueNDRangeKernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarFinal, 1, NULL, &gws, lws, 0, NULL, multi_profilingEvent[4]), "failed in clEnqueueNDRangeKernel"); // read back aes key & iv BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_key, CL_FALSE, 0, 16 * gws, aes_key, 0, NULL, multi_profilingEvent[5]), "failed in reading key back"); BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_iv, CL_TRUE, 0, 16 * gws, aes_iv, 0, NULL, multi_profilingEvent[6]), "failed in reading iv back"); return count; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; int k; size_t gws = ((count + (local_work_size - 1)) / local_work_size) * local_work_size; if (new_keys) { HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_key, CL_FALSE, 0, UNICODE_LENGTH * gws, saved_key, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_key"); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_saved_len, CL_FALSE, 0, sizeof(int) * gws, saved_len, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_len"); new_keys = 0; } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarInit, 1, NULL, &gws, &local_work_size, 0, NULL, firstEvent), "failed in clEnqueueNDRangeKernel"); for (k = 0; k < 16; k++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &gws, &local_work_size, 0, NULL, NULL), "failed in clEnqueueNDRangeKernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], RarFinal, 1, NULL, &gws, &local_work_size, 0, NULL, lastEvent), "failed in clEnqueueNDRangeKernel"); // read back aes key & iv HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_key, CL_FALSE, 0, 16 * gws, aes_key, 0, NULL, NULL), "failed in reading key back"); HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], cl_aes_iv, CL_TRUE, 0, 16 * gws, aes_iv, 0, NULL, NULL), "failed in reading iv back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i16 = index*16; unsigned int inlen = 16; int outlen; EVP_CIPHER_CTX aes_ctx; EVP_CIPHER_CTX_init(&aes_ctx); EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); /* AES decrypt, uses aes_iv, aes_key and blob */ if (cur_file->type == 0) { /* rar-hp mode */ unsigned char plain[16]; outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, inlen); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7); } else { if (cur_file->method == 0x30) { /* stored, not deflated */ CRC32_t crc; unsigned char crc_out[4]; unsigned char plain[0x8010]; unsigned long long size = cur_file->unp_size; unsigned char *cipher = cur_file->blob; /* Use full decryption with CRC check. Compute CRC of the decompressed plaintext */ CRC32_Init(&crc); outlen = 0; while (size > 0x8000) { inlen = 0x8000; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, inlen); CRC32_Update(&crc, plain, outlen > size ? size : outlen); size -= outlen; cipher += inlen; } EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, (size + 15) & ~0xf); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); size += outlen; CRC32_Update(&crc, plain, size); CRC32_Final(crc_out, crc); /* Compare computed CRC with stored CRC */ cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4); } else { const int solid = 0; unpack_data_t *unpack_t; unsigned char plain[20]; cracked[index] = 0; /* Decrypt just one block for early rejection */ outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, 16); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); #if 1 /* Early rejection */ if (plain[0] & 0x80) { // PPM checks here. if (!(plain[0] & 0x20) || // Reset bit must be set (plain[1] & 0x80)) // MaxMB must be < 128 goto bailOut; } else { // LZ checks here. if ((plain[0] & 0x40) || // KeepOldTable can't be set !check_huffman(plain)) // Huffman table check goto bailOut; } #endif /* Reset stuff for full check */ EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); #ifdef _OPENMP unpack_t = &unpack_data[omp_get_thread_num()]; #else unpack_t = unpack_data; #endif unpack_t->max_size = cur_file->unp_size; unpack_t->dest_unp_size = cur_file->unp_size; unpack_t->pack_size = cur_file->pack_size; unpack_t->iv = &aes_iv[i16]; unpack_t->ctx = &aes_ctx; unpack_t->key = &aes_key[i16]; if (rar_unpack29(cur_file->blob, solid, unpack_t)) cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4); bailOut:; } } EVP_CIPHER_CTX_cleanup(&aes_ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_ocl_rar = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif cpu_tests // Changed in init if GPU },{ init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
pi_spmd_final.c
/* NAME: PI SPMD final version without false sharing This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The program was parallelized using OpenMP and an SPMD algorithm. The following OpenMP specific lines were added: (1) A line to include omp.h -- the include file that contains OpenMP's function prototypes and constants. (2) A pragma that tells OpenMP to create a team of threads with an integer variable i being created for each thread. (3) two function calls: one to get the thread ID (ranging from 0 to one less than the number of threads), and the other returning the total number of threads. (4) A "single" construct so only one thread prints the number of threads. (5) A cyclic distribution of the loop by changing loop control expressions to run from the thread ID incremented by the number of threads. Local sums accumlated into sum[id]. (6) A barrier to make sure everyone's done. (7) A single construct so only one thread combines the local sums into a single global sum. Note that this program avoids the false sharing problem by storing partial sums into a private scalar. History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> #define MAX_THREADS 4 static long num_steps = 100000000; double step; int main () { int i,j; double pi, full_sum = 0.0; double start_time, run_time; double sum[MAX_THREADS]; step = 1.0/(double) num_steps; for(j=1;j<=MAX_THREADS ;j++){ omp_set_num_threads(j); full_sum = 0.0; start_time = omp_get_wtime(); #pragma omp parallel private(i) { int id = omp_get_thread_num(); int numthreads = omp_get_num_threads(); double x; double partial_sum = 0; #pragma omp single printf(" num_threads = %d",numthreads); for (i=id;i< num_steps; i+=numthreads){ x = (i+0.5)*step; partial_sum += + 4.0/(1.0+x*x); } #pragma omp critical full_sum += partial_sum; } pi = step * full_sum; run_time = omp_get_wtime() - start_time; printf("\n pi is %f in %f seconds %d threds \n ",pi,run_time,j); } }
lune.c
#include <stdio.h> #include <stdlib.h> #include <math.h> struct luneImage_struct { double *xWesternLine; /*!< x locations defining the eastern line [nWesternLine] */ double *yWesternLine; /*!< y locations defining the western line [nWesternLine] */ double *xEasternLine; /*!< x locations defining the eastern line [nEasternLine] */ double *yEasternLine; /*!< y locations defining the eastern line [nEasternLine] */ double *uPts; /*!< u locations in gridsearch */ double *vPts; /*!< v locations in gridsearch */ double *xPts; /*!< x points in lune image */ double *yPts; /*!< y points in lune image */ int nWesternLine; /*!< Number of points in western line */ int nEasternLine; /*!< Number of points in eastern line */ int npts; /*!< Number of points (pixels) in lune image */ char pad[4]; }; void projections_hammer_latLonToXY(const double lambda, const double phi, double *x, double *y); void projections_hammer_xyToLatLon(const double x, const double y, double *lambda, double *phi); static int findNextNorthPointForFixedLongitude( const double lam0, const double phi0, const double dy, double *phi1); static int findNextEastPointForFixedLatitude( const double lam0, const double phi0, const double dx, double *lam1); #pragma omp declare simd static double transform(const double a, const double b, const double c, const double d, const double x); int main() { double dPhi, dLam, x, y, lambda, lambda1, phi, lamMin, lamMax, phiMin, phiMax; double *x0s, *y0s, dx, dy, dyref, phi1, u, umin, umax, v, vmin, vmax, xmin, xmax, xwidth, y0; double xpixel, ypixel; int ierr, ix, iy, nlat, nlon; int nyPixel = 122; // recommend this is even int nxPixel = nyPixel/3.0; // aspect ratio is 1:3 const double xlow =-5.22104768880206e-01; //-sqrt(2.0); const double xhigh = 5.22104768880206e-01; //sqrt(2.0); const double ylow =-sqrt(2.0); const double yhigh = sqrt(2.0); phiMin =-M_PI_2 + 1.e-14; // numerical stability problem phiMax = M_PI_2 - 1.e-14; // numerical stability problem lamMin =-M_PI/6.0; lamMax = M_PI/6.0; /* projections_hammer_latLonToXY(0, phiMin, &x, &y); printf("%16.14e %16.14e\n", x, y); projections_hammer_latLonToXY(0, phiMax, &x, &y); printf("%16.14e %16.14e\n", x, y); getchar(); */ nlon = nxPixel; nlat = nyPixel; dPhi = (phiMax - phiMin)/(double) (nlat - 1); dLam = (lamMax - lamMin)/(double) (nlon - 1); x0s = (double *) aligned_alloc(64, (size_t) (nlat+1)*sizeof(double)); y0s = (double *) aligned_alloc(64, (size_t) (nlat+1)*sizeof(double)); /* for (iy=0; iy<nlat; iy++) { for (ix=0; ix<nlon; ix++) { lambda = lamMin + (double) ix*dLam; phi = phiMin + (double) iy*dPhi; projections_hammer_latLonToXY(lambda, phi, &x, &y); //projections_hammer_xyToLatLon(x, y, &l, &p); //printf("%f %f %f\n", x, y, sqrt(x*x + y*y)); } //printf("\n"); if (iy < nlat - 1){findNextNorthPointForFixedLongitude(lamMin, phi, 0.1, &phi1);} } */ // evaluate the latitudes on the left side dy = (phiMax - phiMin)/(double) (nlat - 1); //dy = M_PI/(double) (nlat - 1); //(yylow //dy = 1.0/(double) (nlat - 1); //dy = 1.0/(double) (nyPixel - 1); //printf("%f\n", dy); dy = transform(0, (double) (nyPixel - 1), ylow, yhigh, 1.0) //phiMin, phiMax, 1.0) - transform(0, (double) (nyPixel - 1), ylow, yhigh, 0.0);//phiMin, phiMax, 0.0); //printf("%f\n", dy); //printf("%f\n", dy); //return 0; phi = phiMin; projections_hammer_latLonToXY(lamMin, phiMin, &x0s[0], &y0s[0]); //printf("%f\n", phi); for (iy=0; iy<nlat; iy++) { ierr = findNextNorthPointForFixedLongitude(lamMin, phi, dy, &phi1); // out of space - draw the top point if (ierr == 1) { projections_hammer_latLonToXY(lamMin, phiMax, &x, &y); iy = iy + 1; x0s[iy+1] = x; y0s[iy+1] = y; nlat = iy + 2; break; } projections_hammer_latLonToXY(lamMin, phi1, &x, &y); if (y > yhigh) { projections_hammer_latLonToXY(lamMin, phiMax, &x, &y); x0s[iy+1] = x; y0s[iy+1] = y; nlat = iy + 2; break; } x0s[iy+1] = x; y0s[iy+1] = y; dyref = y0s[iy+1] - y0s[iy]; //printf("%f\n", dyref); phi = phi1; } //getchar(); // draw the other half for (iy=0; iy<nlat; iy++) { //printf("%f %f\n", x0s[iy], y0s[iy]); //printf("%f %f\n", -x0s[iy], y0s[iy]); //projections_hammer_xyToLatLon(-x0s[iy], y0s[iy], // &lambda, &phi); } //xwidth = transform(-M_PI, M_PI, 0, (double) (nxPixel - 1), M_PI); // Evaluate the longitudes for each latitude dx = 1.0; // pixel width is unity for (iy=0; iy<nlat; iy++) { y = y0s[iy]; // transform x0 into the pixel domain //xmin = transform(-M_PI/6.0, M_PI/6.0, 0, (double) (nxPixel - 1), x0s[iy]); //xmax = transform(-M_PI/6.0, M_PI/6.0, 0, (double) (nxPixel - 1),-x0s[iy]); xmin = transform(xlow, xhigh, 0, (double) (nxPixel - 1), x0s[iy]); xmax = transform(xlow, xhigh, 0, (double) (nxPixel - 1),-x0s[iy]); xmin = (double) ((int) xmin) + dx; xmax = (double) ((int) xmax) - dx; nlon = (int) ((xmax - xmin)/dx + 0.5) + 1; //printf("%f\n", x); //getchar(); for (ix=0; ix<nlon; ix++) { //if (x > xmax){break;} //xpixel = transform(-M_PI, M_PI, 0, (double) (nxPixel - 1), x); xpixel = xmin + (double) ix*dx; //ypixel = transform(-M_PI_2, M_PI_2, 0, (double) (nyPixel - 1), y); ypixel = transform(ylow, yhigh, 0, (double) (nyPixel - 1), y); xpixel = (double) ((int) xpixel); //ypixel = (double) ((int) ypixel); printf("%f %f %f\n", xpixel, ypixel, y0s[iy]); //x = transform(0, (double) (nxPixel - 1),-M_PI/6.0, M_PI/6.0, xpixel); //y = transform(0, (double) (nyPixel - 1),-M_PI_2, M_PI_2, ypixel); x = transform(0, (double) (nxPixel - 1), xlow, xhigh, xpixel); y = transform(0, (double) (nyPixel - 1), ylow, yhigh, ypixel); //if (x >-x0s[iy]){break;} projections_hammer_xyToLatLon(x, y, &lambda, &phi); //if (lambda > 1.0){printf("%f %f %f %f\n", x, y, lambda, phi); getchar();} // (u, v) space u = 0.75*sin(M_PI_2 - phi) - 0.5*sin(2.0*(M_PI_2 - phi)) + 0.0625*sin(4.0*(M_PI_2 - phi)); v = 1.0/3.0*sin(3.0*lambda); //printf("%f %f\n", lambda, phi); //printf("%f %f\n", u, v); } } // evaluate the longitudes return 0; dx = (lamMax - lamMin)/(double) (nlon - 1); lambda = lamMin; lambda = lamMin; phi = 0.5; dx = 0.1; printf("\n"); for (ix=0; ix<101; ix++) { ierr = findNextEastPointForFixedLatitude(lambda, phi, dx, &lambda1); if (lambda > M_PI){break;} if (ierr == 1){break;} projections_hammer_latLonToXY(lambda1, phi, &x, &y); printf("%f %f\n", x, y); //lambda1, phi); lambda = lambda1; } umin =-M_PI_2; umax = M_PI_2; vmin =-M_PI/6.0; vmax = M_PI/6.0; return 0; } /*! * @brief Computes the next northern point such that y_1 = y_0 + dy maps to * dy = y(phi_1) - y(phi_0) with unknown phi_1. In effect this is * solving: * * dy = f(\phi_1) - f(\phi_0) * = \frac{\sqrt{2} \sin \phi_1} * {\sqrt{1 + \cos \phi_1 \cos \frac{\lambda}{2}} * - \frac{\sqrt{2} \sin \phi_0} * {\sqrt{1 + \cos \phi_0 \cos \frac{\lambda}{2}}} * * Calling the second term on the right hand side c and rearraing * has that * * dy + f = \frac{\sqrt{2} \sin \phi_1} * {\sqrt{1 + \cos \phi_1 \cos \frac{\lambda}{2}}} * * or, after squaring, * * (dy + f)^2 = \frac{2 (1 - \cos^2 \phi_1)} * {1 + \cos \phi_1 \cos \frac{\lambda}{2}} * * Rearranging in the form of a quadratic for \cos \phi_1 has that * * 0 = \cos^2 \phi_1 * + \frac{(dy + f)^2}{2} \cos \frac{\lambda}{2} \cos \phi_1 * + \frac{(dy + f)^2 - 2}{2} * * After solving for the roots \pm x = \cos(\phi_1) all that * remains is to compute \phi_1 = acos(x) where it would appear that * either the positive or negative root cah be chosen. * * @param[in] lam0 fixed longitude (radians) * @param[in] phi0 latitude (-pi/2, pi,2) * * @param[in] dy desired grid spacing (radians) * * @param[out] phi1 new latitude so that y(phi_1) - y(phi_0) = dy * * @result -1 indicates that phi0 + dy is greater than pi/2. * 0 indicates succcess. * 1 indicates a failure to find a suitable phi1 * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 */ static int findNextNorthPointForFixedLongitude( const double lam0, const double phi0, const double dy, double *phi1) { double b, c, cosHalfLam0, det, dyf, dyf2, f, sqrtDet, x, y0, y1; const double sqrt2 = 1.4142135623730951; //------------------------------------------------------------------------// // // precompute some values to save a few cycles *phi1 = M_PI_2; // choose as the upper limit the most northern point cosHalfLam0 = cos(0.5*lam0); f = sqrt2*sin(phi0)/(sqrt(1.0 + cos(phi0)*cos(0.5*lam0))); dyf = dy + f; dyf2 = dyf*dyf; // set the b and c terms in the quadratic equation where a = 1 b = 0.5*dyf2*cosHalfLam0; c = 0.5*(dyf2 - 2.0); det = b*b - 4.0*c; // this means that \phi_1 + \phi_2 > pi/2 i.e. we're out of bounds // return with the maximum pi/2 as defind above if (fabs(det) < 1.e-10){det = 0.0;} if (det < 0.0){return 1;} sqrtDet = sqrt(det); *phi1 = acos(0.5*(-b + sqrtDet)); // choose the positive root // resolve the sign ambiguity generated when we squared phi_0 + f // so that phi_1 is > phi_0. i feel tighter bounds could be made. if (phi0 + dy < 0.0){*phi1 =-*phi1;} // verification step y0 = f;//projections_hammer_latLonToXY(lam0, phi0, &x, &y0); projections_hammer_latLonToXY(lam0, *phi1, &x, &y1); if (fabs(y1 - y0 - dy) > 1.e-10) { // try to resolve the sign ambiguity when crossing the equator *phi1 =-*phi1; projections_hammer_latLonToXY(lam0, *phi1, &x, &y1); if (fabs(y1 - y0 - dy) > 1.e-10) { printf("warning in computing phi_1: %f - %f = %f /= dy=%f\n", y1, y0, y1 - y0, dy); return -1; } } return 0; } //============================================================================// /*! * @brief Computes the next eastern point such that x_1 = x_0 + dx maps to * dx = x(\lambda_1) - x(\lambda_0) with unknown lambda_1. This amounts * to solving: * * dx = \frac{2 \sqrt{2} \cos \phi \sin \frac{\lambda_1}{2}} * {\sqrt{1 + \cos \phi \cos \frac{\lambda_1}{2} }} * - \frac{2 \sqrt{2} \cos \phi \sin \frac{\lambda_0}{2}} * {\sqrt{1 + \cos \phi \cos \frac{\lambda_0}{2} }} * * or * * dx + f = \frac{2 \sqrt{2} \cos \phi \sin \frac{\lambda_1}{2}} * {\sqrt{1 + \cos \phi \cos \frac{\lambda_1}{2} }} * * After squaring this becomes * * (dx + f)^2 = \frac{8 \cos^2 \phi (1 - \cos^2 \frac{\lambda_1}{2})} * {1 + \cos \phi \cos \frac{\lambda_1}}{2} } * * Using cos^2 + sin^2 = 1 and rearranging as a quadratic equation has * that * * 0 = \cos^2 \frac{\lambda_1}{2} * + \frac{(dx + f)^2}{8} \cos \phi \cos \frac{\lambda_1}{2} * + \frac{(dx + f)^2 - 8 \cos^2 \phi}{8} * * * */ static int findNextEastPointForFixedLatitude( const double lam0, const double phi0, const double dx, double *lam1) { double b, c, cosPhi, cosPhi2, den1, den2, det, dxf, dxf2, f, halfLam, sqrtDet, x0, x1, y; const double twoSqrt2 = 2.8284271247461903; const double eigth = 0.125; //------------------------------------------------------------------------// *lam1 = M_PI; // define the maximum point cosPhi = cos(phi0); cosPhi2 = cosPhi*cosPhi; den1 = 1.0/(8.0*cosPhi); den2 = 1.0/(8.0*cosPhi2); halfLam = 0.5*lam0; f = twoSqrt2*cosPhi*sin(halfLam)/sqrt(1.0 + cosPhi*cos(halfLam)); dxf = dx + f; dxf2 = dxf*dxf; // create the quadratic b = den1*dxf2; c = den2*dxf2 - 1.0; det = b*b - 4.0*c; // this corresponds to looping around the world if (fabs(det) < 1.e-10){det = 0.0;} if (det < 0.0){return 1;} sqrtDet = sqrt(det); *lam1 = 2.0*acos(0.5*(-b+sqrtDet)); // choose larger root if (lam0 + dx < 0.0){*lam1 =-*lam1;} // verification step x0 = f; //projections_hammer_latLonToXY( lam0, phi0, &x0, &y); projections_hammer_latLonToXY(*lam1, phi0, &x1, &y); if (fabs(x1 - f - dx) > 1.e-10) { *lam1 =-*lam1; projections_hammer_latLonToXY(*lam1, phi0, &x1, &y); // cycling around if (fabs(x1 - x0 - dx) > 1.e-10) { return 1; //printf("warning in computing lam_1: %f - %f = %f /= dy=%f\n ", // x1, x0, x1 - x0, dx); //return -1; } } return 0; } int lune_findLuneInterpolationPoints(const double umin, const double umax, const double vmin, const double vmax, const int nxPixel, const int nyPixel) { const char *fcnm = "lune_findLuneInterpolationPoints\0"; double dx, dy, lam0, phi0; int ix, iy; const double sqrt2 = 1.4142135623730951; if (nxPixel < 2 || nyPixel < 2) { if (nxPixel < 2){printf("%s: Insufficient number of x pixels\n", fcnm);} if (nyPixel < 2){printf("%s: Insufficient number of y pixels\n", fcnm);} return -1; } // Start at the south-western most point and work up the `west' projections_hammer_latLonToXY(vmin, umin, &lam0, &phi0); // iterate until the northern most point is reached (but not exceeded) dx = 1.0/(double) (nxPixel - 1); dy = 1.0/(double) (nyPixel - 1); if (umax <= umin || vmax <= vmin) { } // map from cartesian onto unit sphere for (iy=0; iy<nyPixel; iy++) { for (ix=0; ix<nxPixel; ix++) { /* x = (double) ix*dx; y = (double) iy*dy; // Get the coordinate r = sqrt(x*x + y*y + 1.0); theta = arccos(1.0/r); phi = atan2(y, x); */ } } return 0; } /*! * @brief Conversion of x and y to longitude and latitude in the Hammer * (equal-area) projection * * @param[in] x x location * @param[in] y y location * * @param[out] lambda longitude (radians) * @param[out] phi latitude (radians) * * @url https://en.wikipedia.org/wiki/Hammer_projection * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ void projections_hammer_xyToLatLon(const double x, const double y, double *lambda, double *phi) { double z; // Compute intermediate variable z = sqrt(1.0 - 0.0625*(x*x) - 0.25*(y*y)); // Compute longitude *lambda = 2.0*atan2(z*x, 4.0*(z*z) - 2.0); *phi = asin(z*y); return; } /*! * @brief Conversion of longitude and latitude to x and y in the * Hammer (equal-area) projection. * * @param[in] lambda longitude (radians). \f$ \lambda \in [-\pi, \pi ] \f$. * @param[in] phi latitude (radians). \f$ \phi \in [0, \pi] \f$. * * @param[out] x x corresponding to longitude. * \f$ x \in [-2 \sqrt{2}, 2 \sqrt{2}] \f$. * @param[out] y y corresponding to latitude. * \f$ y \in \left [-\sqrt{2], \sqrt{2} ] \right \f$. * * @url https://en.wikipedia.org/wiki/Hammer_projection * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ void projections_hammer_latLonToXY(const double lambda, const double phi, double *x, double *y) { double cosLam2, cosPhi, den, halfLam, sinLam2, sinPhi; const double sqrt2 = 1.4142135623730951; const double twoSqrt2 = 2.8284271247461903; halfLam = 0.5*lambda; cosPhi = cos(phi); sinPhi = sin(phi); sinLam2 = sin(halfLam); cosLam2 = cos(halfLam); den = 1.0/sqrt(1.0 + cosPhi*cosLam2); *x = twoSqrt2*cosPhi*sinLam2*den; *y = sqrt2*sinPhi*den; return; } //============================================================================// /*! * @brief Interpolate function values in rectilinear (v, u) space * * @param[in] nv number of v coordinates (> 1) * @param[in] v monotonic increasing v points which * correspond to longitudes [nv]. Note that * \f$ v \in \left [-\frac{1}{3}, \frac{1}{3} \right ] \f$. * @param[in] nu number of u coordinates (> 1) * @param[in] u monotonic increasing u points which * correspond to colatitudes [nu]. Note that * \f$ u \in \left [ 0, \frac{3 \pi}{4} \right ] \f$. * @param[in] f values at f(v,u). * @param[in] nInt number of interpolation points * @param[in] vInt colatitude-like interpolation points [nInt] * @param[in] uInt longitude-like interpolation points [nInt] * @param[out] fInt interpolated values of f at f(vInt, uInt). * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ int lune_interpolateInVUSpace(const int nv, const double *__restrict__ v, const int nu, double *__restrict__ u, const double *__restrict__ f, const int nInt, const double *__restrict__ vInt, const double *__restrict__ uInt, double *__restrict__ fInt) { // nothing to do if (nInt < 1){return 0;} // input errors if (nu < 2 || nv < 2 || u == NULL || v == NULL || vInt == NULL || uInt == NULL || fInt == NULL) { return -1; } // verify u and v are sorted return 0; } //============================================================================// /*! * @brief Convert from \f$ x \in [a,b] \f$ to \f$ \xi \in [c, d] \f$. * * @param[in] a lower value in from interval [a,b] s.t. \f$ a \le x \le b \f$ * @param[in] b upper bound in from interval [a,b] s.t. \f$ a \le x \le b \f$ * @param[in] c lower bound in to interval [c,d] s.t. \f$ c \le \xi \le d \f$ * @param[in] d upper bound in to interval [c,d] s.t. \f$ c \le \xi \le d \f$ * * @result corresponding transformed variable of x which now resides in the * new interval \f$ \xi \in [c, d]\f$. * * @author Ben Baker * * @copyright MIT */ #pragma omp declare simd static double transform(const double a, const double b, const double c, const double d, const double x) { double c1, c2, det, xi; det = 1.0/(b - a); c1 = det*(b*c - a*d); c2 = det*(d - c); xi = c1 + x*c2; return xi; }
GB_unaryop__minv_int64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_fp32 // op(A') function: GB_tran__minv_int64_fp32 // C type: int64_t // A type: float // cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ float #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z ; GB_CAST_SIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_fp32 ( int64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GrB_Matrix_exportHint.c
//------------------------------------------------------------------------------ // GrB_Matrix_exportHint: determine sizes of arrays for GrB_Matrix_export //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_transpose.h" #define GB_FREE_ALL ; GrB_Info GrB_Matrix_exportHint // suggest the best export format ( GrB_Format *format, // export format GrB_Matrix A // matrix to export ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_exportHint (&format, A)") ; GB_BURBLE_START ("GrB_Matrix_exportHint") ; GB_RETURN_IF_NULL (format) ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; // finish any pending work since this can change the sparsity of A GB_MATRIX_WAIT (A) ; int sparsity = GB_sparsity (A) ; bool is_csc = A->is_csc ; //-------------------------------------------------------------------------- // determine format that requires the least amount of modification //-------------------------------------------------------------------------- switch (sparsity) { default: case GxB_SPARSE : // CSR and CSC formats are supported by GraphBLAS, so if the matrix // is sparse by-row or sparse by-column, then suggest CSR or CSC. // The matrix can be exported with no change at all. case GxB_BITMAP : // Bitmap is not supported as a GrB_Format. It cannot be exported // as full, in general, so select CSR or CSC. (*format) = is_csc ? GrB_CSC_FORMAT : GrB_CSR_FORMAT ; break ; case GxB_HYPERSPARSE : // Hypersparse is not supported as a GrB_Format. Expanding a huge // hypersparse matrix to sparse can be costly, so suggest COO. (*format) = GrB_COO_FORMAT ; break ; case GxB_FULL : // Full is not supported by GraphBLAS (*format) = is_csc ? GrB_CSC_FORMAT : GrB_CSR_FORMAT ; // if full was supported by GraphBLAS; // (*format) = is_csc ? GrB_DENSE_COL_FORMAT : GrB_DENSE_ROW_FORMAT ; break ; } GB_BURBLE_END ; #pragma omp flush return (GrB_SUCCESS) ; }
trmv_x_dia_n_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%zu)\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu)\n", ctx->params.super.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.am_hdr_size = 8; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { ucs_trace_func(""); #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->super.api == UCX_PERF_API_UCP) { if (strcmp(parent_params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.dev_name); } if (strcmp(parent_params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
14_vector_cross_product_size_n.c
/* Program : 14 Author : Debottam Topic : Write a C program using OpenMP features to find the cross product of two vectors of size n each in constant time complexity. [Hint: Cross product C[i] = (A[i]*B[i])] */ #include <stdio.h> #include <omp.h> #define N 3 int main() { int A[]={3,-5,4},i; int B[]={2,6,5},C[N],D=0; int m= omp_get_num_procs(); omp_set_num_threads(m); #pragma omp parallel for shared(C) private(i) for(i=0;i<N;i++) { C[i]=A[(i+1)%N]*B[(i+2)%N]-A[(i+2)%N]*B[(i+1)%N]; } printf("Cross product, C = "); for (i= 0; i< N; i++) printf("%d\t",C[i]); printf("\n"); return 0; }
task-two.c
/* * task-two.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #define NUM_THREADS 2 int main(int argc, char *argv[]) { int var = 0; int i; #pragma omp parallel for num_threads(NUM_THREADS) shared(var) \ schedule(static, 1) for (i = 0; i < NUM_THREADS; i++) { #pragma omp task shared(var) if (0) // the task is inlined an executed locally { var++; } } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
coordinate_common.h
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #pragma once #include <algorithm> #include <string> #include <utility> #include <vector> #include <limits> #include "xgboost/data.h" #include "./param.h" #include "../gbm/gblinear_model.h" #include "../common/random.h" namespace xgboost { namespace linear { struct CoordinateParam : public dmlc::Parameter<CoordinateParam> { int top_k; DMLC_DECLARE_PARAMETER(CoordinateParam) { DMLC_DECLARE_FIELD(top_k) .set_lower_bound(0) .set_default(0) .describe("The number of top features to select in 'thrifty' feature_selector. " "The value of zero means using all the features."); } }; /** * \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the * number of training instances. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * \param w The weight. * \param reg_alpha Unnormalised L1 penalty. * \param reg_lambda Unnormalised L2 penalty. * * \return The weight update. */ inline double CoordinateDelta(double sum_grad, double sum_hess, double w, double reg_alpha, double reg_lambda) { if (sum_hess < 1e-5f) return 0.0f; const double sum_grad_l2 = sum_grad + reg_lambda * w; const double sum_hess_l2 = sum_hess + reg_lambda; const double tmp = w - sum_grad_l2 / sum_hess_l2; if (tmp >= 0) { return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w); } else { return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w); } } /** * \brief Calculate update to bias. * * \param sum_grad The sum gradient. * \param sum_hess The sum hess. * * \return The weight update. */ inline double CoordinateDeltaBias(double sum_grad, double sum_hess) { return -sum_grad / sum_hess; } /** * \brief Get the gradient with respect to a single feature. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to a single feature. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param fidx The target feature. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for a given feature. */ inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; sum_grad += p.GetGrad() * v; sum_hess += p.GetHess() * v * v; } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Get the gradient with respect to the bias. Row-wise multithreaded. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param gpair Gradients. * \param p_fmat The feature matrix. * * \return The gradient and diagonal Hessian entry for the bias. */ inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group, const std::vector<GradientPair> &gpair, DMatrix *p_fmat) { double sum_grad = 0.0, sum_hess = 0.0; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess) for (bst_omp_uint i = 0; i < ndata; ++i) { auto &p = gpair[i * num_group + group_idx]; if (p.GetHess() >= 0.0f) { sum_grad += p.GetGrad(); sum_hess += p.GetHess(); } } return std::make_pair(sum_grad, sum_hess); } /** * \brief Updates the gradient vector with respect to a change in weight. * * \param fidx The feature index. * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dw The change in weight. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateResidualParallel(int fidx, int group_idx, int num_group, float dw, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dw == 0.0f) return; for (const auto &batch : p_fmat->GetColumnBatches()) { auto col = batch[fidx]; // update grad value const auto num_row = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < num_row; ++j) { GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx]; if (p.GetHess() < 0.0f) continue; p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0); } } } /** * \brief Updates the gradient vector based on a change in the bias. * * \param group_idx Zero-based index of the group. * \param num_group Number of groups. * \param dbias The change in bias. * \param in_gpair The gradient vector to be updated. * \param p_fmat The input feature matrix. */ inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias, std::vector<GradientPair> *in_gpair, DMatrix *p_fmat) { if (dbias == 0.0f) return; const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < ndata; ++i) { GradientPair &g = (*in_gpair)[i * num_group + group_idx]; if (g.GetHess() < 0.0f) continue; g += GradientPair(g.GetHess() * dbias, 0); } } /** * \brief Abstract class for stateful feature selection or ordering * in coordinate descent algorithms. */ class FeatureSelector { public: /*! \brief factory method */ static FeatureSelector *Create(int choice); /*! \brief virtual destructor */ virtual ~FeatureSelector() = default; /** * \brief Setting up the selector state prior to looping through features. * * \param model The model. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * \param param A parameter with algorithm-dependent use. */ virtual void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) {} /** * \brief Select next coordinate to update. * * \param iteration The iteration in a loop through features * \param model The model. * \param group_idx Zero-based index of the group. * \param gpair The gpair. * \param p_fmat The feature matrix. * \param alpha Regularisation alpha. * \param lambda Regularisation lambda. * * \return The index of the selected feature. -1 indicates none selected. */ virtual int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) = 0; }; /** * \brief Deterministic selection by cycling through features one at a time. */ class CyclicFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return iteration % model.param.num_feature; } }; /** * \brief Similar to Cyclic but with random feature shuffling prior to each update. * \note Its randomness is controllable by setting a random seed. */ class ShuffleFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { if (feat_index_.size() == 0) { feat_index_.resize(model.param.num_feature); std::iota(feat_index_.begin(), feat_index_.end(), 0); } std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom()); } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return feat_index_[iteration % model.param.num_feature]; } protected: std::vector<bst_uint> feat_index_; }; /** * \brief A random (with replacement) coordinate selector. * \note Its randomness is controllable by setting a random seed. */ class RandomFeatureSelector : public FeatureSelector { public: int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { return common::GlobalRandom()() % model.param.num_feature; } }; /** * \brief Select coordinate with the greatest gradient magnitude. * \note It has O(num_feature^2) complexity. It is fully deterministic. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). That would reduce the complexity to * O(num_feature*top_k). */ class GreedyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); const bst_uint ngroup = model.param.num_output_group; if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); if (counter_.size() == 0) { counter_.resize(ngroup); gpair_sums_.resize(model.param.num_feature * ngroup); } for (bst_uint gid = 0u; gid < ngroup; ++gid) { counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-K or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; const int ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); auto &sums = gpair_sums_[group_idx * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + group_idx]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } // Find a feature with the largest magnitude of weight change int best_fidx = 0; double best_weight_update = 0.0f; for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) { auto &s = gpair_sums_[group_idx * nfeat + fidx]; float dw = std::abs(static_cast<bst_float>( CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda))); if (dw > best_weight_update) { best_weight_update = dw; best_fidx = fidx; } } return best_fidx; } protected: bst_uint top_k_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; /** * \brief Thrifty, approximately-greedy feature selector. * * \note Prior to cyclic updates, reorders features in descending magnitude of * their univariate weight changes. This operation is multithreaded and is a * linear complexity approximation of the quadratic greedy selection. * * \note It allows restricting the selection to top_k features per group with * the largest magnitude of univariate weight change, by passing the top_k value * through the `param` argument of Setup(). */ class ThriftyFeatureSelector : public FeatureSelector { public: void Setup(const gbm::GBLinearModel &model, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda, int param) override { top_k_ = static_cast<bst_uint>(param); if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max(); const bst_uint ngroup = model.param.num_output_group; const bst_omp_uint nfeat = model.param.num_feature; if (deltaw_.size() == 0) { deltaw_.resize(nfeat * ngroup); sorted_idx_.resize(nfeat * ngroup); counter_.resize(ngroup); gpair_sums_.resize(nfeat * ngroup); } // Calculate univariate gradient sums std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.)); for (const auto &batch : p_fmat->GetColumnBatches()) { // column-parallel is usually faster than row-parallel #pragma omp parallel for schedule(static) for (bst_omp_uint i = 0; i < nfeat; ++i) { const auto col = batch[i]; const bst_uint ndata = col.size(); for (bst_uint gid = 0u; gid < ngroup; ++gid) { auto &sums = gpair_sums_[gid * nfeat + i]; for (bst_uint j = 0u; j < ndata; ++j) { const bst_float v = col[j].fvalue; auto &p = gpair[col[j].index * ngroup + gid]; if (p.GetHess() < 0.f) continue; sums.first += p.GetGrad() * v; sums.second += p.GetHess() * v * v; } } } } // rank by descending weight magnitude within the groups std::fill(deltaw_.begin(), deltaw_.end(), 0.f); std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0); bst_float *pdeltaw = &deltaw_[0]; for (bst_uint gid = 0u; gid < ngroup; ++gid) { // Calculate univariate weight changes for (bst_omp_uint i = 0; i < nfeat; ++i) { auto ii = gid * nfeat + i; auto &s = gpair_sums_[ii]; deltaw_[ii] = static_cast<bst_float>(CoordinateDelta( s.first, s.second, model[i][gid], alpha, lambda)); } // sort in descending order of deltaw abs values auto start = sorted_idx_.begin() + gid * nfeat; std::sort(start, start + nfeat, [pdeltaw](size_t i, size_t j) { return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j)); }); counter_[gid] = 0u; } } int NextFeature(int iteration, const gbm::GBLinearModel &model, int group_idx, const std::vector<GradientPair> &gpair, DMatrix *p_fmat, float alpha, float lambda) override { // k-th selected feature for a group auto k = counter_[group_idx]++; // stop after either reaching top-N or going through all the features in a group if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1; // note that sorted_idx stores the "long" indices const size_t grp_offset = group_idx * model.param.num_feature; return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset); } protected: bst_uint top_k_; std::vector<bst_float> deltaw_; std::vector<size_t> sorted_idx_; std::vector<bst_uint> counter_; std::vector<std::pair<double, double>> gpair_sums_; }; inline FeatureSelector *FeatureSelector::Create(int choice) { switch (choice) { case kCyclic: return new CyclicFeatureSelector(); case kShuffle: return new ShuffleFeatureSelector(); case kThrifty: return new ThriftyFeatureSelector(); case kGreedy: return new GreedyFeatureSelector(); case kRandom: return new RandomFeatureSelector(); default: LOG(FATAL) << "unknown coordinate selector: " << choice; } return nullptr; } } // namespace linear } // namespace xgboost
EmbeddingBag.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.) ******************************************************************************/ #define JIT_REDUCE_COLS_IDX #define JIT_REPLICATE_COLS_VAR #define JIT_SCALE #if defined( JIT_REDUCE_COLS_IDX) || defined(JIT_REPLICATE_COLS_VAR) || defined(JIT_SCALE) #include <libxsmm.h> #endif #include "utils.h" #include "rtm.h" template <typename T> class EmbeddingBagImpl { public: EmbeddingBagImpl(int M, int E) : M(M), E(E) { weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment); } ~EmbeddingBagImpl() { my_free(weight_); weight_ = 0; } void init(T low = -0.1, T high = 0.1) { init_random(M * E, weight_, low, high); } #ifdef JIT_REDUCE_COLS_IDX void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; libxsmm_meltwfunction_reduce_cols_idx kernel; int _ld = E; kernel = libxsmm_dispatch_meltw_reduce_cols_idx(E, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32) ; #pragma omp parallel for for (int n = 0; n < N; n++) { libxsmm_meltw_reduce_cols_idx_param params; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); params.n = end - start; params.ind_ptr = &indices[start]; params.inp_ptr = weight; params.out_ptr = &output[n][0]; kernel( &params ); } } #else void forward(int N, int NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); #pragma omp simd for (long v = 0; v < E; v++) output[n][v] = 0; for (long s = start; s < end; s++) { auto ind = indices[s]; #pragma omp simd for (long v = 0; v < E; v++) { output[n][v] += weight[ind][v]; } } } } #endif #ifdef JIT_REPLICATE_COLS_VAR void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; int _ld = E; libxsmm_meltwfunction_unary kernel = libxsmm_dispatch_meltw_unary(E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR); #pragma omp parallel for for (int n = 0; n < N; n++) { libxsmm_meltw_unary_param unary_param; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); unsigned long long _N = end-start; unary_param.in.primary = (void*)&gradout[n][0]; unary_param.out.primary = (void*)&values[start][0]; unary_param.out.secondary = (void*)&_N; kernel(&unary_param); } } #else void backward(int N, int NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; #pragma omp parallel for for (int n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); for (long s = start; s < end; s++) { #pragma omp simd #ifdef STREAMING_WRITES #pragma vector nontemporal(values) #endif for (long v = 0; v < E; v++) values[s][v] = gradout[n][v]; } } } #endif #ifdef JIT_SCALE void update(int NS, const T *grads_, const long *indices, float lr) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; int _ld = E; libxsmm_meltwfunction_binary kernel = libxsmm_dispatch_meltw_binary(E, 1, &_ld, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0, LIBXSMM_MELTW_TYPE_BINARY_MULADD); SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { libxsmm_meltw_binary_param binary_param; long ind = indices[i]; binary_param.in0.primary = (void*)&lr; binary_param.in1.primary = (void*)&grads[i][0]; binary_param.out.primary = (void*)&weight[ind][0]; { TransactionScope guard(fallBackLock, 100, 0); kernel(&binary_param); } } } #else void update(int NS, const T *grads_, const long *indices, float lr) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { long ind = indices[i]; { TransactionScope guard(fallBackLock, 100, 0); #pragma omp simd for (long v = 0; v < E; v++) weight[ind][v] += lr * grads[i][v]; } } } #endif T *weight_; int M; int E; }; typedef EmbeddingBagImpl<FTyp> EmbeddingBag;
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/wand.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MagickPathExtent], *description; RectangleInfo extent; MagickWand *wand; Image *image; CacheView *view; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue, wand_view->exception); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=MagickWandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width); wand_view->image=DestroyImage(wand_view->image); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~MagickWandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) duplex->extent.width; x++) { PixelSetQuantumPixel(duplex->image,duplex_pixels, duplex->pixel_wands[id][x]); duplex_pixels+=GetPixelChannels(duplex->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MagickPathExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != MagickWandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands) { PixelWand ***pixel_wands; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); wand_view->exception=exception; if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->exception=exception; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == MagickWandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], pixels); pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) { PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels); pixels+=GetPixelChannels(source->image); } sync=SyncCacheViewAuthenticPixels(source->view,source->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
AlloyArray.h
/* * Copyright(C) 2015, Blake C. Lucas, Ph.D. (img.science@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #ifndef INCLUDE_ALLOYARRAY_H_ #define INCLUDE_ALLOYARRAY_H_ #include "AlloyCommon.h" #include "cereal/cereal.hpp" #include "cereal/types/array.hpp" #include "cereal/types/string.hpp" namespace aly { template<class T, int C> struct Array: public std::array<T, C> { template<class Archive> void save(Archive & archive) const { archive(cereal::make_nvp(MakeString() << "array" << C, *this)); } template<class Archive> void load(Archive & archive) { archive(cereal::make_nvp(MakeString() << "array" << C, *this)); } void set(const T& val) { for (float& v : *this) { v = val; } } Array():std::array<T,C>() { } Array(const T& val) { set(val); } T max() const { T tmp(std::numeric_limits<T>::min()); for (int i = 0; i < C; i++) { if ((*this)[i] > tmp) tmp = (*this)[i]; } return tmp; } T min() const { T tmp(std::numeric_limits<T>::max()); for (int i = 0; i < C; i++) { if ((*this)[i] < tmp) tmp = (*this)[i]; } return tmp; } T mean() const { T tmp(0); for (int i = 0; i < C; i++) { tmp += (*this)[i]; } return tmp/T(C); } T median() const { std::vector<T> tmp(this->begin(),this->end()); std::sort(tmp.begin(), tmp.end()); if (C% 2 == 0) { return T(((double)tmp[C / 2]+ (double)tmp[C / 2 - 1])* 0.5f); } else { return tmp[C / 2]; } } T stdDev() const { if (C < 2) { return T(0); } T avg = mean(); double var(0.0); for (const T& val : *this) { double e = (double)(val - avg); var += e * e; } var = var / (double)(C - 1); return T(std::sqrt(var)); } }; template<class T, int C> void Transform(Array<T, C>& im1, Array<T, C>& im2, const std::function<void(T&, T&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (size_t offset = 0; offset < sz; offset++) { func(im1[offset], im2[offset]); } } template<class T, int C> void Transform(Array<T, C>& im1, const std::function<void(T&)>& func) { size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1[offset]); } } template<class T, int C> void Transform(Array<T, C>& im1, const Array<T, C>& im2, const std::function<void(T&, const T&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1[offset], im2[offset]); } } template<class T, int C> void Transform(Array<T, C>& im1, const Array<T, C>& im2, const Array<T, C>& im3, const Array<T, C>& im4, const std::function<void(T&, const T&, const T&, const T&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1[offset], im2[offset], im3[offset], im4[offset]); } } template<class T, int C> void Transform(Array<T, C>& im1, const Array<T, C>& im2, const Array<T, C>& im3, const std::function<void(T&, const T&, const T&)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (int offset = 0; offset < (int) sz; offset++) { func(im1[offset], im2[offset], im3[offset]); } } template<class T, int C> void Transform(Array<T, C>& im1, Array<T, C>& im2, const std::function<void(size_t offset, T& val1, T& val2)>& func) { if (im1.size() != im2.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << im1.size() << "!=" << im2.size()); size_t sz = im1.size(); #pragma omp parallel for for (size_t offset = 0; offset < sz; offset++) { func(offset, im1[offset], im2[offset]); } } template<class T, class L, class R, int C> std::basic_ostream<L, R> & operator <<( std::basic_ostream<L, R> & ss, const Array<T, C> & A) { size_t index = 0; for (const T& val : A) { ss << std::setw(5) << index++ << ": " << val << std::endl; } return ss; } template<class T, int C> Array<T, C> operator+(const T& scalar, const Array<T, C>& img) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = scalar + val2;}; Transform(out, img, f); return out; } template<class T, int C> void ScaleAdd(Array<T, C>& out, const T& scalar, const Array<T, C>& in) { out.resize(in.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 += scalar * val2;}; Transform(out, in, f); } template<class T, int C> void ScaleAdd(Array<T, C>& out, const Array<T, C>& in1, const T& scalar, const Array<T, C>& in2) { out.resize(in1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2+scalar * val3;}; Transform(out, in1, in2, f); } template<class T, int C> void ScaleAdd(Array<T, C>& out, const Array<T, C>& in1, const T& scalar2, const Array<T, C>& in2, const T& scalar3, const Array<T, C>& in3) { out.resize(in1.size()); std::function<void(T&, const T&, const T&, const T&)> f = [=](T& out, const T& val1, const T& val2, const T& val3) { out = val1+scalar2*val2+scalar3 * val3;}; Transform(out, in1, in2, in3, f); } template<class T, int C> void ScaleSubtract(Array<T, C>& out, const T& scalar, const Array<T, C>& in) { out.resize(in.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 -= scalar * val2;}; Transform(out, in, f); } template<class T, int C> void ScaleSubtract(Array<T, C>& out, const Array<T, C>& in1, const T& scalar, const Array<T, C>& in2) { out.resize(in1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2 - scalar * val3;}; Transform(out, in1, in2, f); } template<class T, int C> void Subtract(Array<T, C>& out, const Array<T, C>& v1, const Array<T, C>& v2) { out.resize(v1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2-val3;}; Transform(out, v1, v2, f); } template<class T, int C> void Add(Array<T, C>& out, const Array<T, C>& v1, const Array<T, C>& v2) { out.resize(v1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2 + val3;}; Transform(out, v1, v2, f); } template<class T, int C> Array<T, C> operator-(const T& scalar, const Array<T, C>& img) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = scalar - val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator*(const T& scalar, const Array<T, C>& img) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = scalar*val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator/(const T& scalar, const Array<T, C>& img) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = scalar / val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator+(const Array<T, C>& img, const T& scalar) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = val2 + scalar;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator-(const Array<T, C>& img, const T& scalar) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = val2 - scalar;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator*(const Array<T, C>& img, const T& scalar) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = val2*scalar;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator/(const Array<T, C>& img, const T& scalar) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = val2 / scalar;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator-(const Array<T, C>& img) { Array<T, C> out(img.size()); std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 = -val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator+=(Array<T, C>& out, const Array<T, C>& img) { std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 += val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator-=(Array<T, C>& out, const Array<T, C>& img) { std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 -= val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator*=(Array<T, C>& out, const Array<T, C>& img) { std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 *= val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator/=(Array<T, C>& out, const Array<T, C>& img) { std::function<void(T&, const T&)> f = [=](T& val1, const T& val2) {val1 /= val2;}; Transform(out, img, f); return out; } template<class T, int C> Array<T, C> operator+=(Array<T, C>& out, const T& scalar) { std::function<void(T&)> f = [=](T& val1) {val1 += scalar;}; Transform(out, f); return out; } template<class T, int C> Array<T, C> operator-=(Array<T, C>& out, const T& scalar) { std::function<void(T&)> f = [=](T& val1) {val1 -= scalar;}; Transform(out, f); return out; } template<class T, int C> Array<T, C> operator*=(Array<T, C>& out, const T& scalar) { std::function<void(T&)> f = [=](T& val1) {val1 *= scalar;}; Transform(out, f); return out; } template<class T, int C> Array<T, C> operator/=(Array<T, C>& out, const T& scalar) { std::function<void(T&)> f = [=](T& val1) {val1 /= scalar;}; Transform(out, f); return out; } template<class T, int C> Array<T, C> operator+(const Array<T, C>& img1, const Array<T, C>& img2) { Array<T, C> out(img1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2 + val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Array<T, C> operator-(const Array<T, C>& img1, const Array<T, C>& img2) { Array<T, C> out(img1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2 - val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Array<T, C> operator*(const Array<T, C>& img1, const Array<T, C>& img2) { Array<T, C> out(img1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2*val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> Array<T, C> operator/(const Array<T, C>& img1, const Array<T, C>& img2) { Array<T, C> out(img1.size()); std::function<void(T&, const T&, const T&)> f = [=](T& val1, const T& val2, const T& val3) {val1 = val2 / val3;}; Transform(out, img1, img2, f); return out; } template<class T, int C> double dot(const Array<T, C>& a, const Array<T, C>& b) { double ans = 0.0; if (a.size() != b.size()) throw std::runtime_error( MakeString() << "Array dimensions do not match. " << a.size() << "!=" << b.size()); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int) sz; i++) { ans += a[i]*b[i]; } return ans; } template<class T, int C> T lengthSqr(const Array<T, C>& a) { T ans(0); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int) sz; i++) { ans += a[i]* a[i]; } return ans; } template<class T, int C> T distanceSqr(const Array<T, C>& a, const Array<T, C>& b) { T ans(0); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int)sz; i++) { ans += (a[i] - b[i])*(a[i] - b[i]); } return ans; } template<class T, int C> T distanceL1(const Array<T, C>& a, const Array<T, C>& b) { T ans(0); size_t sz = a.size(); #pragma omp parallel for reduction(+:ans) for (int i = 0; i < (int)sz; i++) { ans += std::abs(a[i] - b[i]); } return ans; } template<class T, int C> T distance(const Array<T, C>& a, const Array<T, C>& b) { return std::sqrt(distanceSqr(a,b)); } template<class T, int C> T max(const Array<T, C>& a) { size_t sz = a.size(); T tmp(std::numeric_limits<T>::min()); //#pragma omp parallel for reduction(max:tmp) for (int i = 0; i < (int) sz; i++) { if (a[i] > tmp) tmp = a[i]; } return tmp; } template<class T, int C> T min(const Array<T, C>& a) { size_t sz = a.size(); T tmp(std::numeric_limits<T>::max()); //#pragma omp parallel for reduction(min:tmp) for (int i = 0; i < (int) sz; i++) { if (a[i] < tmp) tmp = a[i]; } return tmp; } template<class T, int C> T length(const Array<T, C>& a) { return std::sqrt(lengthSqr(a)); } } ; #endif /* INCLUDE_ALLOYARRAY_H_ */
user_defined_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_USER_DEFINED_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class UserDefinedMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> m_move_updater_wrapper; public: /*************************************************************************/ UserDefinedMoveGenerator(void) { this->initialize(); } /*************************************************************************/ virtual ~UserDefinedMoveGenerator(void) { /// nothing to do } /*************************************************************************/ inline constexpr void initialize(void) { this->m_move_updater_wrapper = [](std::vector<Move<T_Variable, T_Expression>> *) {}; } /*************************************************************************/ inline constexpr void set_move_updater( const std::function<void(std::vector<Move<T_Variable, T_Expression>> *)> &a_MOVE_UPDATER) { this->m_move_updater_wrapper = a_MOVE_UPDATER; } /*************************************************************************/ void setup(void) { auto move_updater = // [this](auto * a_moves_ptr, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { m_move_updater_wrapper(a_moves_ptr); const int MOVES_SIZE = a_moves_ptr->size(); a_flags->resize(MOVES_SIZE); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_moves_ptr)[i].sense = MoveSense::UserDefined; (*a_flags)[i] = 1; if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_selection_variable( (*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_bound_violation((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves_ptr)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves_ptr)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
GB_binop__times_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_01__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__times_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc32) // A*D function (colscale): GB (_AxD__times_fc32) // D*A function (rowscale): GB (_DxB__times_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__times_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__times_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc32) // C=scalar+B GB (_bind1st__times_fc32) // C=scalar+B' GB (_bind1st_tran__times_fc32) // C=A+scalar GB (_bind2nd__times_fc32) // C=A'+scalar GB (_bind2nd_tran__times_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_mul (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_mul (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FC32 || GxB_NO_TIMES_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__times_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__times_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_mul (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_mul (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_mul (x, aij) ; \ } GrB_Info GB (_bind1st_tran__times_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_mul (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__times_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmp-ex33.c
/* Although OpenMP emphasizes data parallelism, there are also constructs for * instruction parallelism */ #include <stdio.h> #include <omp.h> int main(void) { int clients[2] = {-1, -1}; #pragma omp parallel { int id = omp_get_thread_num(); #pragma omp sections { #pragma omp section { int found[2] = {0, 0}; printf ("I am %d and I am the server.\n",id); while (1) { int i; for (i = 0; i < 2; i++) { if (!found[i] && clients[i] >= 0) { found[i] = 1; printf("Thread %d has checked in as client %d\n",clients[i],i); } } if (found[0] && found[1]) break; } } #pragma omp section { printf("I am %d and I am client 0.\n",id); clients[0] = id; } #pragma omp section { printf("I am %d and I am client 1.\n",id); clients[1] = id; } } } return 0; }
GB_subassign_zombie.c
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap // C->iso is not affected. #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_phbix_free (S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; struct GB_Matrix_opaque S_header ; GrB_Matrix S = GB_clear_static_header (&S_header) ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Sx = (int64_t *) S->x ; int64_t *restrict Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_nnz (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
elastic_kernel_3d_so8.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict block_sizes_vec, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_fxx_vec, struct dataobj *restrict save_src_fyy_vec, struct dataobj *restrict save_src_fzz_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict tau_sol_xx_vec, struct dataobj *restrict tau_sol_xy_vec, struct dataobj *restrict tau_sol_xz_vec, struct dataobj *restrict tau_sol_yy_vec, struct dataobj *restrict tau_sol_yz_vec, struct dataobj *restrict tau_sol_zz_vec, struct dataobj *restrict v_sol_x_vec, struct dataobj *restrict v_sol_y_vec, struct dataobj *restrict v_sol_z_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_fxx)[save_src_fxx_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fxx_vec->size[1]])save_src_fxx_vec->data; float(*restrict save_src_fyy)[save_src_fyy_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fyy_vec->size[1]])save_src_fyy_vec->data; float(*restrict save_src_fzz)[save_src_fzz_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_fzz_vec->size[1]])save_src_fzz_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict tau_sol_xx)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xx_vec->size[1]][tau_sol_xx_vec->size[2]][tau_sol_xx_vec->size[3]])tau_sol_xx_vec->data; float(*restrict tau_sol_xy)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xy_vec->size[1]][tau_sol_xy_vec->size[2]][tau_sol_xy_vec->size[3]])tau_sol_xy_vec->data; float(*restrict tau_sol_xz)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_xz_vec->size[1]][tau_sol_xz_vec->size[2]][tau_sol_xz_vec->size[3]])tau_sol_xz_vec->data; float(*restrict tau_sol_yy)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yy_vec->size[1]][tau_sol_yy_vec->size[2]][tau_sol_yy_vec->size[3]])tau_sol_yy_vec->data; float(*restrict tau_sol_yz)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_yz_vec->size[1]][tau_sol_yz_vec->size[2]][tau_sol_yz_vec->size[3]])tau_sol_yz_vec->data; float(*restrict tau_sol_zz)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]] __attribute__((aligned(64))) = (float(*)[tau_sol_zz_vec->size[1]][tau_sol_zz_vec->size[2]][tau_sol_zz_vec->size[3]])tau_sol_zz_vec->data; float(*restrict v_sol_x)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_x_vec->size[1]][v_sol_x_vec->size[2]][v_sol_x_vec->size[3]])v_sol_x_vec->data; float(*restrict v_sol_y)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_y_vec->size[1]][v_sol_y_vec->size[2]][v_sol_y_vec->size[3]])v_sol_y_vec->data; float(*restrict v_sol_z)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_sol_z_vec->size[1]][v_sol_z_vec->size[2]][v_sol_z_vec->size[3]])v_sol_z_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int sf = 8; int t_blk_size = 2 * sf * (time_M - time_m); //int xb_size = 64; //int yb_size = 64; //x0_blk0_size = 8; //y0_blk0_size = 8; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size , yb_size , x0_blk0_size, y0_blk0_size); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (2), t1 = (time + 1) % (2); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (2), t1 = (((time / sf) % (time_M - time_m + 1))) % (2)) { int tw = ((time / sf) % (time_M - time_m + 1)); #pragma omp parallel num_threads(nthreads) { //printf(" Change of time block : %d \n", tw); #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { //printf(" Change of inner xblock %d \n", x0_blk0); for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating velocity x %d \n", x - time + 4); //printf(" \n PDE update : \n"); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating velocity x %d z: %d \n", x - time + 4, z + 4); float r26 = 1.0 / h_z; float r25 = 1.0 / h_y; float r24 = 1.0 / h_x; v_sol_x[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xx[t0][x - time + 5][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 12][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xx[t0][x - time + 6][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 11][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xx[t0][x - time + 7][y - time + 8][z + 8] - tau_sol_xx[t0][x - time + 10][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_xx[t0][x - time + 9][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_xy[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_xz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_x[t0][x - time + 8][y - time + 8][z + 8]; v_sol_y[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xy[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xy[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xy[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xy[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xy[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yy[t0][x - time + 8][y - time + 5][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 12][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yy[t0][x - time + 8][y - time + 6][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 11][z + 8]) + 5.22163029879319e-2F * (tau_sol_yy[t0][x - time + 8][y - time + 7][z + 8] - tau_sol_yy[t0][x - time + 8][y - time + 10][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_yy[t0][x - time + 8][y - time + 9][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 4] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 11]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 5] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 10]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 8][z + 6] - tau_sol_yz[t0][x - time + 8][y - time + 8][z + 9]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 8][z + 7] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + v_sol_y[t0][x - time + 8][y - time + 8][z + 8]; v_sol_z[t1][x - time + 8][y - time + 8][z + 8] = r24 * (4.56702358488521e-4F * (tau_sol_xz[t0][x - time + 4][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 11][y - time + 8][z + 8]) + 6.2659563586471e-3F * (-tau_sol_xz[t0][x - time + 5][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 10][y - time + 8][z + 8]) + 5.22163029879319e-2F * (tau_sol_xz[t0][x - time + 6][y - time + 8][z + 8] - tau_sol_xz[t0][x - time + 9][y - time + 8][z + 8]) + 7.8324454477134e-1F * (-tau_sol_xz[t0][x - time + 7][y - time + 8][z + 8] + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8])) + r25 * (4.56702358488521e-4F * (tau_sol_yz[t0][x - time + 8][y - time + 4][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 11][z + 8]) + 6.2659563586471e-3F * (-tau_sol_yz[t0][x - time + 8][y - time + 5][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 10][z + 8]) + 5.22163029879319e-2F * (tau_sol_yz[t0][x - time + 8][y - time + 6][z + 8] - tau_sol_yz[t0][x - time + 8][y - time + 9][z + 8]) + 7.8324454477134e-1F * (-tau_sol_yz[t0][x - time + 8][y - time + 7][z + 8] + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8])) + r26 * (4.56702358488521e-4F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 5] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 12]) + 6.2659563586471e-3F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 6] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 11]) + 5.22163029879319e-2F * (tau_sol_zz[t0][x - time + 8][y - time + 8][z + 7] - tau_sol_zz[t0][x - time + 8][y - time + 8][z + 10]) + 7.8324454477134e-1F * (-tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8] + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 9])) + v_sol_z[t0][x - time + 8][y - time + 8][z + 8]; } } } } } } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb - 4); x0_blk0 <= +min((x_M + time), (xb - 4 + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb - 4); y0_blk0 <= +min((y_M + time), (yb - 4 + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb - 4 + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb - 4 + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++) { //printf(" Updating stress x %d \n", x - time + 4); #pragma omp simd aligned(tau_sol_xx, tau_sol_xz, tau_sol_zz, v_sol_x, v_sol_z : 32) for (int z = z_m; z <= z_M; z += 1) { //printf(" Updating x %d z: %d \n", x - time + 4, z + 4); float r47 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 8]; float r46 = -v_sol_y[t1][x - time + 8][y - time + 8][z + 8]; float r45 = -v_sol_x[t1][x - time + 8][y - time + 8][z + 8]; float r44 = -v_sol_y[t1][x - time + 8][y - time + 7][z + 8] + v_sol_y[t1][x - time + 8][y - time + 8][z + 8]; float r43 = v_sol_y[t1][x - time + 8][y - time + 6][z + 8] - v_sol_y[t1][x - time + 8][y - time + 9][z + 8]; float r42 = -v_sol_y[t1][x - time + 8][y - time + 5][z + 8] + v_sol_y[t1][x - time + 8][y - time + 10][z + 8]; float r41 = v_sol_y[t1][x - time + 8][y - time + 4][z + 8] - v_sol_y[t1][x - time + 8][y - time + 11][z + 8]; float r40 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 7] + v_sol_z[t1][x - time + 8][y - time + 8][z + 8]; float r39 = v_sol_z[t1][x - time + 8][y - time + 8][z + 6] - v_sol_z[t1][x - time + 8][y - time + 8][z + 9]; float r38 = -v_sol_z[t1][x - time + 8][y - time + 8][z + 5] + v_sol_z[t1][x - time + 8][y - time + 8][z + 10]; float r37 = v_sol_z[t1][x - time + 8][y - time + 8][z + 4] - v_sol_z[t1][x - time + 8][y - time + 8][z + 11]; float r36 = -v_sol_x[t1][x - time + 7][y - time + 8][z + 8] + v_sol_x[t1][x - time + 8][y - time + 8][z + 8]; float r35 = v_sol_x[t1][x - time + 6][y - time + 8][z + 8] - v_sol_x[t1][x - time + 9][y - time + 8][z + 8]; float r34 = -v_sol_x[t1][x - time + 5][y - time + 8][z + 8] + v_sol_x[t1][x - time + 10][y - time + 8][z + 8]; float r33 = v_sol_x[t1][x - time + 4][y - time + 8][z + 8] - v_sol_x[t1][x - time + 11][y - time + 8][z + 8]; float r32 = 1.0 / h_y; float r31 = 1.0 / h_z; float r30 = 1.0 / h_x; float r29 = r30 * (2.95943128300561e-3F * r33 + 4.06033972040332e-2F * r34 + 3.38361643361799e-1F * r35 + 5.07542465011829F * r36); float r28 = r31 * (2.95943128300561e-3F * r37 + 4.06033972040332e-2F * r38 + 3.38361643361799e-1F * r39 + 5.07542465011829F * r40); float r27 = r32 * (2.95943128300561e-3F * r41 + 4.06033972040332e-2F * r42 + 3.38361643361799e-1F * r43 + 5.07542465011829F * r44); tau_sol_xx[t1][x - time + 8][y - time + 8][z + 8] = r27 + r28 + r30 * (5.91886256601123e-3F * r33 + 8.12067944080664e-2F * r34 + 6.76723286723597e-1F * r35 + 1.01508493002366e+1F * r36) + tau_sol_xx[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_xy[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 5][y - time + 8][z + 8] - v_sol_y[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 6][y - time + 8][z + 8] + v_sol_y[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 7][y - time + 8][z + 8] - v_sol_y[t1][x - time + 10][y - time + 8][z + 8])) + r32 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 5][z + 8] - v_sol_x[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 6][z + 8] + v_sol_x[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 7][z + 8] - v_sol_x[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_xy[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_xz[t1][x - time + 8][y - time + 8][z + 8] = r30 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 9][y - time + 8][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 5][y - time + 8][z + 8] - v_sol_z[t1][x - time + 12][y - time + 8][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 6][y - time + 8][z + 8] + v_sol_z[t1][x - time + 11][y - time + 8][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 7][y - time + 8][z + 8] - v_sol_z[t1][x - time + 10][y - time + 8][z + 8])) + r31 * (2.53771232505914F * (r45 + v_sol_x[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 5] - v_sol_x[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_x[t1][x - time + 8][y - time + 8][z + 6] + v_sol_x[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_x[t1][x - time + 8][y - time + 8][z + 7] - v_sol_x[t1][x - time + 8][y - time + 8][z + 10])) + tau_sol_xz[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_yy[t1][x - time + 8][y - time + 8][z + 8] = r28 + r29 + r32 * (5.91886256601123e-3F * r41 + 8.12067944080664e-2F * r42 + 6.76723286723597e-1F * r43 + 1.01508493002366e+1F * r44) + tau_sol_yy[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_yz[t1][x - time + 8][y - time + 8][z + 8] = r31 * (2.53771232505914F * (r46 + v_sol_y[t1][x - time + 8][y - time + 8][z + 9]) + 1.47971564150281e-3F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 5] - v_sol_y[t1][x - time + 8][y - time + 8][z + 12]) + 2.03016986020166e-2F * (-v_sol_y[t1][x - time + 8][y - time + 8][z + 6] + v_sol_y[t1][x - time + 8][y - time + 8][z + 11]) + 1.69180821680899e-1F * (v_sol_y[t1][x - time + 8][y - time + 8][z + 7] - v_sol_y[t1][x - time + 8][y - time + 8][z + 10])) + r32 * (2.53771232505914F * (r47 + v_sol_z[t1][x - time + 8][y - time + 9][z + 8]) + 1.47971564150281e-3F * (v_sol_z[t1][x - time + 8][y - time + 5][z + 8] - v_sol_z[t1][x - time + 8][y - time + 12][z + 8]) + 2.03016986020166e-2F * (-v_sol_z[t1][x - time + 8][y - time + 6][z + 8] + v_sol_z[t1][x - time + 8][y - time + 11][z + 8]) + 1.69180821680899e-1F * (v_sol_z[t1][x - time + 8][y - time + 7][z + 8] - v_sol_z[t1][x - time + 8][y - time + 10][z + 8])) + tau_sol_yz[t0][x - time + 8][y - time + 8][z + 8]; tau_sol_zz[t1][x - time + 8][y - time + 8][z + 8] = r27 + r29 + r31 * (5.91886256601123e-3F * r37 + 8.12067944080664e-2F * r38 + 6.76723286723597e-1F * r39 + 1.01508493002366e+1F * r40) + tau_sol_zz[t0][x - time + 8][y - time + 8][z + 8]; } for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { //printf("\n Source_injection at : "); int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_fxx[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r1 = save_src_fyy[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; float r2 = save_src_fzz[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; tau_sol_xx[t1][x - time + 8][y - time + 8][zind + 8] += r0; tau_sol_yy[t1][x - time + 8][y - time + 8][zind + 8] += r1; tau_sol_zz[t1][x - time + 8][y - time + 8][zind + 8] += r2; //printf(" Time %d , at : %d, %d \n", tw, x - time + 4, zind + 4); } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; } /* Backdoor edit at Tue Jul 28 12:03:11 2020*/ /* Backdoor edit at Tue Jul 28 12:27:09 2020*/ /* Backdoor edit at Tue Jul 28 12:28:10 2020*/ /* Backdoor edit at Tue Jul 28 15:17:20 2020*/
taskloop-3.c
/* { dg-do run } */ /* { dg-options "-O2 -fopenmp -std=c99" } */ int g; int a[1024]; __attribute__((noinline, noclone)) int f1 (int x) { #pragma omp taskloop firstprivate (x) lastprivate (x) for (int i = 0; i < 64; i++) { if (x != 74) __builtin_abort (); if (i == 63) x = i + 4; } return x; } __attribute__((noinline, noclone)) void f2 (void) { #pragma omp taskloop firstprivate (g) lastprivate (g) nogroup for (int i = 0; i < 64; i++) { if (g != 77) __builtin_abort (); if (i == 63) g = i + 9; } } __attribute__((noinline, noclone)) long long f3 (long long a, long long b, long long c) { long long i; int l; #pragma omp taskloop default (none) lastprivate (i, l) for (i = a; i < b; i += c) l = i; return l * 7 + i; } __attribute__((noinline, noclone)) long long f4 (long long a, long long b, long long c, long long d, long long e, long long f, int k) { long long i, j; int l; #pragma omp taskloop default (none) collapse(2) \ firstprivate (k) lastprivate (i, j, k, l) for (i = a; i < b; i += e) for (j = c; j < d; j += f) { if (k != 73) __builtin_abort (); if (i == 31 && j == 46) k = i; l = j; } return i + 5 * j + 11 * k + 17 * l; } int main () { #pragma omp parallel #pragma omp single { if (f1 (74) != 63 + 4) __builtin_abort (); g = 77; f2 (); #pragma omp taskwait if (g != 63 + 9) __builtin_abort (); if (f3 (7, 12, 2) != 11 * 7 + 13) __builtin_abort (); if (f4 (0, 32, 16, 48, 1, 2, 73) != 32 + 5 * 48 + 11 * 31 + 17 * 46) __builtin_abort (); } return 0; }
GB_binop__rminus_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fc32) // A*D function (colscale): GB (_AxD__rminus_fc32) // D*A function (rowscale): GB (_DxB__rminus_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fc32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fc32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fc32) // C=scalar+B GB (_bind1st__rminus_fc32) // C=scalar+B' GB (_bind1st_tran__rminus_fc32) // C=A+scalar GB (_bind2nd__rminus_fc32) // C=A'+scalar GB (_bind2nd_tran__rminus_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_minus (bij, aij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_minus (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC32 || GxB_NO_RMINUS_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_minus (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
c55c7aec73df0f31d67fbe39510946453b899e1d.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; } ; int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12]; u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8; } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } rec[time][p_rec] = sum; } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; } #pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) return 0; } /* Backdoor edit at Wed Mar 4 04:57:45 2020*/
parallelLUFactorization-pipeline.c
// C Program to decompose a matrix into // lower and upper traingular matrix #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <limits.h> #include <omp.h> int n = 4; struct record_s { double val; long prod; struct record_s* next; }; struct buf_list { struct record_s* head_p; struct record_s* tail_p; }; struct buf_list buff[4]; int producers_done[4]; struct record_s* Dequeue(long thread) { struct record_s* rec_p = malloc(sizeof(*rec_p)); if (buff[thread].head_p == NULL) { return NULL; } else if (buff[thread].head_p == buff[thread].tail_p) { rec_p = buff[thread].head_p; buff[thread].head_p = buff[thread].tail_p = NULL; } else { rec_p = buff[thread].head_p; buff[thread].head_p = buff[thread].head_p->next; } return rec_p; } double Get(long thread) { struct record_s* rec_p; double data; while (producers_done[thread] < 1 || buff[thread].head_p != NULL) { #pragma omp critical (queue) { rec_p = Dequeue(thread); } if (rec_p != NULL) { data = rec_p -> val; free(rec_p); return data; } } return 0.; } struct record_s* Create_record(long thread, double data) { struct record_s* rec_p = malloc(sizeof(*rec_p)); rec_p->next=NULL; rec_p->prod=thread; rec_p->val=data; return rec_p; } void Enqueue(long thread, struct record_s* rec_p) { if (buff[thread].tail_p == NULL) { buff[thread].head_p = rec_p; } else { buff[thread].tail_p->next = rec_p; } buff[thread].tail_p = rec_p; } void Put(long thread, double data) { struct record_s *rec_p; rec_p = Create_record(thread,data); #pragma omp critical(queue) { Enqueue(thread, rec_p); } #pragma omp critical(done) producers_done[thread]++; } // print matrix to file void printNxNMatrix(double** matrix, int n, FILE *fp) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fprintf(fp, "%lf ", matrix[i][j]); } fprintf(fp, "\n"); } } // multiply two matrix int multiply(int m1, int m2, double** lower, int n1, int n2, double** upper, double** matrix, int T, FILE *fp) { int x, i, j; double** res_matrix = (double**)malloc(m1*sizeof(double*)); for (i = 0; i<m1; i++){ res_matrix[i] = (double*)malloc(n2*sizeof(double)); } int ii; #pragma omp parallel num_threads(T) { #pragma omp for { for (ii=0; ii < m1; ii++) { int jj, kk; for (jj=0; jj < n2; jj++){ for (kk=0; kk<n1; kk++){ res_matrix[ii][jj] += (lower[ii][kk]) * (upper[kk][jj]); } } } } #pragma omp barrier } for (i = 0; i < m1; i++) { for (j = 0; j < n2; j++) { if(llabs((long long)matrix[i][j] - (long long)res_matrix[i][j]) > 0.0001){ printNxNMatrix(res_matrix, n, fp); return 0; } } } return 1; } void luDecomposition(double** matrix, int n, int p) { // Setting lower and upper...\n"); double** lower = (double**)malloc(n*sizeof(double*)); double** upper = (double**)malloc(n*sizeof(double*)); for (int i = 0; i<n; i++){ lower[i] = (double*)malloc((n)*sizeof(double)); upper[i] = (double*)malloc((n)*sizeof(double)); } for (int i = 0; i < n; i++){ for (int j = 0; j < n ; j++){ lower[i][j] = matrix[i][j]; upper[i][j] = matrix[i][j]; } } int i,j,k,row; omp_set_num_threads(p); // Decomposing matrix into Upper and Lower // triangular matrix double t = omp_get_wtime(); #pragma omp parallel private(k, i, j, row) shared(matrix, lower, upper) num_threads(p) { long threadID = omp_get_thread_num(); int blockSize = n/p; if (threadID != 0) { for (k = 0; k < n-1; k++) { row = Get(threadID); Put(threadID+1, row); for (i = threadID * blockSize; i < threadID * blockSize + blockSize; i++) { if (row < i) { lower[i][row] = upper[i][row] / upper[row][row]; } } for (i = threadID * blockSize; i < threadID * blockSize + blockSize; i++) { for (j = k; j < n; j++) { if (row < i) { upper[i][j] = upper[i][j] - lower[i][row] * upper[row][j]; } } } } // thread 0 } else { for (k = 0; k < n-1; k++) { Put(threadID+1,k); for (i = k+1; i < threadID * blockSize + blockSize; i++) { lower[i][k] = upper[i][k]/upper[k][k]; } for (i = k+1; i < threadID * blockSize + blockSize; i++) { for (j = k+1; j < n; j++) { upper[i][j] = upper[i][j] - lower[i][k] * upper[k][j]; } } } } } for (int j = 0; j < n; j++) { for(int i = 0; i < n; i++){ if(i > j){ upper[i][j] = 0.; }else{ lower[i][j] = 0.; } } lower[j][j] = 1.; } // putting lower matrix into file FILE *fp; fp = fopen("lower_matrix.txt", "w"); printNxNMatrix(lower, n, fp); fclose(fp); // putting upper matrix into file fp = fopen("upper_matrix.txt", "w"); printNxNMatrix(upper, n, fp); fclose(fp); // check correctness fp = fopen("multiplication_result.txt","w"); if (multiply(n, n, lower, n, n, upper, matrix, 4, fp)){ printf("Pass the test.\n"); } fclose(fp); } // Driver code int main() { int i, j; FILE *fp; // read in file1 fp = fopen("file_six_six.txt", "r"); fscanf(fp, "%i", &(n)); double** matrix = (double**)malloc(n*sizeof(double*)); for (i = 0; i<n; i++){ matrix[i] = (double*)malloc((n)*sizeof(double)); } for (i = 0; i < n; ++i) { for (j = 0;j < n; ++j) { fscanf(fp, "%lf", &matrix[i][j]); } } fclose(fp); luDecomposition(matrix, n, 2); return 0; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if (IsGrayImageType(type)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertAdobe98ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertDisplayP3ToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertProPhotoToRGB(const double r,const double g, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToAdobe98(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToAdobe98(X,Y,Z,r,g,b); } static void ConvertRGBToDisplayP3(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToDisplayP3(X,Y,Z,r,g,b); } static void ConvertRGBToProPhoto(const double red,const double green, const double blue,double *r,double *g,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToProPhoto(X,Y,Z,r,g,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const double red,const double green, const double blue,const IlluminantType illuminant,double *L,double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void inline ConvertXYZToJzazbz(const double X,const double Y, const double Z,const double white_luminance,double *Jz,double *az,double *bz) { #define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */ #define Jzazbz_g 0.66 #define Jzazbz_c1 (3424.0/4096.0) #define Jzazbz_c2 (2413.0/128.0) #define Jzazbz_c3 (2392.0/128.0) #define Jzazbz_n (2610.0/16384.0) #define Jzazbz_p (1.7*2523.0/32.0) #define Jzazbz_d (-0.56) #define Jzazbz_d0 (1.6295499532821566e-11) double gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1)); Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1)); Zp=Z; L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp; M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp; S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp; gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n); Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n); Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n); Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p); Iz=0.5*Lp+0.5*Mp; *az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5; *bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5; *Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0; } static void inline ConvertJzazbzToXYZ(const double Jz,const double az, const double bz,const double white_luminance,double *X,double *Y,double *Z) { double azz, bzz, gamma, Iz, L, Lp, M, Mp, S, Sp, Xp, Yp, Zp; gamma=Jz+Jzazbz_d0; Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0); azz=az-0.5; bzz=bz-0.5; Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz; Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz; Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz; gamma=pow(Lp,1.0/Jzazbz_p); L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Mp,1.0/Jzazbz_p); M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); gamma=pow(Sp,1.0/Jzazbz_p); S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/ Jzazbz_n); Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S; Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S; Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S; *X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b; *Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g; *Z=Zp; } static void ConvertRGBToJzazbz(const double red,const double green, const double blue,const double white_luminance,double *Jz,double *az, double *bz) { double X, Y, Z; ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z); ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz); } static void ConvertJzazbzToRGB(const double Jz,const double az, const double bz,const double white_luminance,double *red,double *green, double *blue) { double X, Y, Z; ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,blue,green); } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158* DecodePixelGamma(GetPixelGreen(image,q))+0.072186* DecodePixelGamma(GetPixelBlue(image,q)); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case Adobe98Colorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from sRGB to target colorspace. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case Adobe98Colorspace: { ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z); break; } case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case DisplayP3Colorspace: { ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case JzazbzColorspace: { ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z); break; } case ProPhotoColorspace: { ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002* PerceptibleReciprocal(film_gamma)))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); x_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].x=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; z_map[i].y=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,sRGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { MagickBooleanType is_bilevel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageMonochrome(image) != MagickFalse) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); is_bilevel=IdentifyImageMonochrome(image,exception); if (is_bilevel == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,const IlluminantType illuminant,double *red,double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158* EncodePixelGamma(GetPixelGreen(image,q))+0.072186* EncodePixelGamma(GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case Adobe98Colorspace: case CMYColorspace: case DisplayP3Colorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case JzazbzColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case ProPhotoColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { const char *value; double white_luminance; /* Transform image from source colorspace to sRGB. */ white_luminance=10000.0; value=GetImageProperty(image,"white-luminance",exception); if (value != (const char *) NULL) white_luminance=StringToDouble(value,(char **) NULL); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case Adobe98Colorspace: { ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue); break; } case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case DisplayP3Colorspace: { ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case JzazbzColorspace: { ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue); break; } case ProPhotoColorspace: { ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma)); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002* PerceptibleReciprocal(film_gamma))-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformsRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
opencl_dmg_fmt_plug.c
/* * This software is Copyright (c) 2017, magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Debug levels: * 1 show what "test" hits * 2 dump printables from the decrypted blocks * 3 dump hex from the decrypted blocks * 4 dump decrypted blocks to files (will overwrite with no mercy): * dmg.debug.main main block * dmg.debug alternate block (if present, this is the start block) */ //#define DMG_DEBUG 2 #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_dmg; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_dmg); #else #include <stdint.h> #include <string.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef DMG_DEBUG #define NEED_OS_FLOCK #include "os.h" #endif #include "arch.h" #include "aes.h" #include "hmac_sha.h" #include "formats.h" #include "common.h" #include "options.h" #include "jumbo.h" #include "loader.h" #include "dmg_common.h" #include "common-opencl.h" #define OUTLEN 32 #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "dmg-opencl" #define FORMAT_NAME "Apple DMG" #define FORMAT_TAG "$dmg$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES/AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #undef HTONL #define HTONL(n) (((((unsigned long)(n) & 0xFF)) << 24) | \ ((((unsigned long)(n) & 0xFF00)) << 8) | \ ((((unsigned long)(n) & 0xFF0000)) >> 8) | \ ((((unsigned long)(n) & 0xFF000000)) >> 24)) #ifdef DMG_DEBUG extern volatile int bench_running; #endif /* This handles all widths */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static int *cracked; static int any_cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[20]; unsigned int ivlen; unsigned char iv[32]; int headerver; unsigned char chunk[8192]; uint32_t encrypted_keyblob_size; uint8_t encrypted_keyblob[128]; unsigned int len_wrapped_aes_key; unsigned char wrapped_aes_key[296]; unsigned int len_hmac_sha1_key; unsigned char wrapped_hmac_sha1_key[300]; char scp; /* start chunk present */ unsigned char zchunk[4096]; /* chunk #0 */ int cno; int data_size; unsigned int iterations; } *cur_salt; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static int new_keys; static struct fmt_main *self; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; size_t insize, outsize, settingsize; #define cracked_size (sizeof(*cracked) * global_work_size * ocl_v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 251) #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl_autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = PLAINTEXT_LENGTH * gws; /// Allocate memory inbuffer = mem_calloc(1, key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(1, cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), ALGORITHM_NAME " %ux", ocl_v_width); self->params.algorithm_name = valgo; } } static void reset(struct db_main *db) { if (!autotuned) { int iterations = 1000; char build_opts[64]; if (db->real) { struct db_salt *s = db->real->salts; void *salt; while (s->next && s->cost[0] < db->max_cost[0]) s = s->next; salt = s->salt; iterations = ((struct custom_salt*)salt)->iterations; } snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 2 * (iterations - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 200)); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; int headerver; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$dmg$" marker */ if ((p = strtokm(ctcopy, "*")) == NULL) goto err; headerver = atoi(p); if (headerver == 2) { if ((p = strtokm(NULL, "*")) == NULL) /* salt len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 20) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ivlen */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (atoi(p) > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encrypted_keyblob_size */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 128) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encrypted keyblob */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* chunk number */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* data_size */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "*")) == NULL) /* chunk */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if (res > 8192) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* scp */ goto err; if (!isdec(p)) goto err; res = atoi(p); /* FIXME: which values are allowed here? */ if (res == 1) { if ((p = strtokm(NULL, "*")) == NULL) /* zchunk */ goto err; if (strlen(p) != 4096 * 2) goto err; } } else if (headerver == 1) { if ((p = strtokm(NULL, "*")) == NULL) /* salt len */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 20) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* len_wrapped_aes_key */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 296) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* wrapped_aes_key */ goto err; if (hexlenl(p, &extra) != res*2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* len_hmac_sha1_key */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 300) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* hmac_sha1_key */ goto err; if (strlen(p) / 2 != res) goto err; } else goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "*"); cs.headerver = atoi(p); if (cs.headerver == 2) { p = strtokm(NULL, "*"); cs.saltlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ivlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.encrypted_keyblob_size = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.encrypted_keyblob_size; i++) cs.encrypted_keyblob[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.cno = atoi(p); p = strtokm(NULL, "*"); cs.data_size = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.data_size; i++) cs.chunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.scp = atoi(p); if (cs.scp == 1) { p = strtokm(NULL, "*"); for (i = 0; i < 4096; i++) cs.zchunk[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } if ((p = strtokm(NULL, "*"))) cs.iterations = atoi(p); else cs.iterations = 1000; } else { p = strtokm(NULL, "*"); cs.saltlen = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.len_wrapped_aes_key = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.len_wrapped_aes_key; i++) cs.wrapped_aes_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.len_hmac_sha1_key = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.len_hmac_sha1_key; i++) cs.wrapped_hmac_sha1_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; if ((p = strtokm(NULL, "*"))) cs.iterations = atoi(p); else cs.iterations = 1000; } if (cs.iterations == 0) cs.iterations = 1000; MEM_FREE(keeptr); return (void*)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, 20); currentsalt.length = 20; currentsalt.outlen = 32; currentsalt.iterations = cur_salt->iterations; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } #undef set_key static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int apple_des3_ede_unwrap_key1(const unsigned char *wrapped_key, const int wrapped_key_len, const unsigned char *decryptKey) { DES_key_schedule ks1, ks2, ks3; unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)]; unsigned char TEMP2[sizeof(cur_salt->wrapped_hmac_sha1_key)]; unsigned char IV[8] = { 0x4a, 0xdd, 0xa2, 0x2c, 0x79, 0xe8, 0x21, 0x05 }; int outlen, i; DES_set_key((DES_cblock*)(decryptKey + 0), &ks1); DES_set_key((DES_cblock*)(decryptKey + 8), &ks2); DES_set_key((DES_cblock*)(decryptKey + 16), &ks3); DES_ede3_cbc_encrypt(wrapped_key, TEMP1, wrapped_key_len, &ks1, &ks2, &ks3, (DES_cblock*)IV, DES_DECRYPT); outlen = check_pkcs_pad(TEMP1, wrapped_key_len, 8); if (outlen < 0) return 0; for (i = 0; i < outlen; i++) TEMP2[i] = TEMP1[outlen - i - 1]; outlen -= 8; DES_ede3_cbc_encrypt(TEMP2 + 8, TEMP1, outlen, &ks1, &ks2, &ks3, (DES_cblock*)TEMP2, DES_DECRYPT); outlen = check_pkcs_pad(TEMP1, outlen, 8); if (outlen < 0) return 0; return 1; } static int hash_plugin_check_hash(unsigned char *derived_key) { unsigned char hmacsha1_key_[20]; unsigned char aes_key_[32]; int ret = 0; if (cur_salt->headerver == 1) { if (apple_des3_ede_unwrap_key1(cur_salt->wrapped_aes_key, cur_salt->len_wrapped_aes_key, derived_key) && apple_des3_ede_unwrap_key1(cur_salt->wrapped_hmac_sha1_key, cur_salt->len_hmac_sha1_key, derived_key)) { return 1; } } else { DES_key_schedule ks1, ks2, ks3; unsigned char TEMP1[sizeof(cur_salt->wrapped_hmac_sha1_key)]; AES_KEY aes_decrypt_key; unsigned char outbuf[8192 + 1]; unsigned char outbuf2[4096 + 1]; unsigned char iv[20]; #ifdef DMG_DEBUG unsigned char *r; #endif const char nulls[8] = { 0 }; DES_set_key((DES_cblock*)(derived_key + 0), &ks1); DES_set_key((DES_cblock*)(derived_key + 8), &ks2); DES_set_key((DES_cblock*)(derived_key + 16), &ks3); memcpy(iv, cur_salt->iv, 8); DES_ede3_cbc_encrypt(cur_salt->encrypted_keyblob, TEMP1, cur_salt->encrypted_keyblob_size, &ks1, &ks2, &ks3, (DES_cblock*)iv, DES_DECRYPT); memcpy(aes_key_, TEMP1, 32); memcpy(hmacsha1_key_, TEMP1, 20); hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cur_salt->cno, 4, iv, 20); if (cur_salt->encrypted_keyblob_size == 48) AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key); else AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->chunk, outbuf, cur_salt->data_size, &aes_decrypt_key, iv, AES_DECRYPT); /* 8 consecutive nulls */ if (memmem(outbuf, cur_salt->data_size, (void*)nulls, 8)) { #ifdef DMG_DEBUG if (!bench_running) fprintf(stderr, "NULLS found!\n\n"); #endif ret = 1; } /* These tests seem to be obsoleted by the 8xNULL test */ #ifdef DMG_DEBUG /* </plist> is a pretty generic signature for Apple */ if (memmem(outbuf, cur_salt->data_size, (void*)"</plist>", 8)) { if (!bench_running) fprintf(stderr, "</plist> found!\n\n"); ret = 1; } /* Journalled HFS+ */ if (memmem(outbuf, cur_salt->data_size, (void*)"jrnlhfs+", 8)) { if (!bench_running) fprintf(stderr, "jrnlhfs+ found!\n\n"); ret = 1; } /* Handle compressed DMG files, CMIYC 2012 and self-made samples. Is this test obsoleted by the </plist> one? */ if ((r = memmem(outbuf, cur_salt->data_size, (void*)"koly", 4))) { unsigned int *u32Version = (unsigned int*)(r + 4); if (HTONL(*u32Version) == 4) { if (!bench_running) fprintf(stderr, "koly found!\n\n"); ret = 1; } } /* Handle VileFault sample images */ if (memmem(outbuf, cur_salt->data_size, (void*)"EFI PART", 8)) { if (!bench_running) fprintf(stderr, "EFI PART found!\n\n"); ret = 1; } /* Apple is a good indication but it's short enough to produce false positives */ if (memmem(outbuf, cur_salt->data_size, (void*)"Apple", 5)) { if (!bench_running) fprintf(stderr, "Apple found!\n\n"); ret = 1; } #endif /* DMG_DEBUG */ /* Second buffer test. If present, *this* is the very first block of the DMG */ if (cur_salt->scp == 1) { int cno = 0; hmac_sha1(hmacsha1_key_, 20, (unsigned char*)&cno, 4, iv, 20); if (cur_salt->encrypted_keyblob_size == 48) AES_set_decrypt_key(aes_key_, 128, &aes_decrypt_key); else AES_set_decrypt_key(aes_key_, 128 * 2, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->zchunk, outbuf2, 4096, &aes_decrypt_key, iv, AES_DECRYPT); /* 8 consecutive nulls */ if (memmem(outbuf2, 4096, (void*)nulls, 8)) { #ifdef DMG_DEBUG if (!bench_running) fprintf(stderr, "NULLS found in alternate block!\n\n"); #endif ret = 1; } #ifdef DMG_DEBUG /* This test seem to be obsoleted by the 8xNULL test */ if (memmem(outbuf2, 4096, (void*)"Press any key to reboot", 23)) { if (!bench_running) fprintf(stderr, "MS-DOS UDRW signature found in alternate block!\n\n"); ret = 1; } #endif /* DMG_DEBUG */ } #ifdef DMG_DEBUG /* Write block as hex, strings or raw to a file. */ if (ret && !bench_running) { #if DMG_DEBUG == 4 int fd; if ((fd = open("dmg.debug.main", O_RDWR | O_CREAT | O_TRUNC, 0660)) == -1) perror("open()"); else { #if FCNTL_LOCKS struct flock lock = { 0 }; lock.l_type = F_WRLCK; while (fcntl(fd, F_SETLKW, &lock)) { if (errno != EINTR) pexit("fcntl(F_WRLCK)"); } #elif OS_FLOCK while (flock(fd, LOCK_EX)) { if (errno != EINTR) pexit("flock(LOCK_EX)"); } #endif if ((write(fd, outbuf, cur_salt->data_size) == -1)) perror("write()"); if (cur_salt->scp == 1) if ((write(fd, outbuf2, 4096) == -1)) perror("write()"); if (close(fd)) perror("close"); } #endif #if DMG_DEBUG == 3 dump_stuff(outbuf, cur_salt->data_size); if (cur_salt->scp == 1) { fprintf(stderr, "2nd block:\n"); dump_stuff(outbuf2, 4096); } #endif #if DMG_DEBUG == 2 dump_text(outbuf, cur_salt->data_size); if (cur_salt->scp == 1) { fprintf(stderr, "2nd block:\n"); dump_text(outbuf2, 4096); } #endif } #endif /* DMG_DEBUG */ } return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i, j, index; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } /// Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (j = 0; j < (ocl_autotune_running ? 1 : (currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); } /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (hash_plugin_check_hash((unsigned char*)output[index].dk) == 1) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_dmg = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef DMG_DEBUG FMT_NOT_EXACT | #endif FMT_CASE | FMT_8_BIT | FMT_HUGE_INPUT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, dmg_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
GB_binop__isge_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isge_int32 // A.*B function (eWiseMult): GB_AemultB__isge_int32 // A*D function (colscale): GB_AxD__isge_int32 // D*A function (rowscale): GB_DxB__isge_int32 // C+=B function (dense accum): GB_Cdense_accumB__isge_int32 // C+=b function (dense accum): GB_Cdense_accumb__isge_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int32 // C=scalar+B GB_bind1st__isge_int32 // C=scalar+B' GB_bind1st_tran__isge_int32 // C=A+scalar GB_bind2nd__isge_int32 // C=A'+scalar GB_bind2nd_tran__isge_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isge_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isge_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isge_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isge_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isge_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isge_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isge_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isge_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isge_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__isge_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__isge_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
frozen_soil.c
/****************************************************************************** * @section DESCRIPTION * * This subroutine redistributes soil properties based on the thermal solutions * found for the current time step. *****************************************************************************/ #include <vic_run.h> /****************************************************************************** * @brief This subroutine redistributes soil properties based on the thermal * solutions found for the current time step. *****************************************************************************/ int calc_layer_average_thermal_props(energy_bal_struct *energy, layer_data_struct *layer, soil_con_struct *soil_con, size_t Nnodes, double *T) { extern option_struct options; size_t i; int ErrorFlag; size_t tmpTshape[] = { options.Nlayer, Nnodes, options.Nfrost + 1 }; size_t tmpZshape[] = { options.Nlayer, Nnodes }; double ***tmpT; double **tmpZ; // allocate memory for tmpT and tmpZ malloc_3d_double(tmpTshape, &tmpT); malloc_2d_double(tmpZshape, &tmpZ); if (options.FROZEN_SOIL && soil_con->FS_ACTIVE) { find_0_degree_fronts(energy, soil_con->Zsum_node, T, Nnodes); } else { energy->Nfrost = 0; } /** Store Layer Temperature Values **/ for (i = 0; i < Nnodes; i++) { energy->T[i] = T[i]; } if (energy->Nfrost > 0) { energy->frozen = true; } else { energy->frozen = false; } /** Compute Soil Layer average properties **/ if (options.QUICK_FLUX) { ErrorFlag = estimate_layer_temperature_quick_flux(layer, soil_con->depth, soil_con->dp, energy->T[0], energy->T[1], soil_con->avg_temp); if (ErrorFlag == ERROR) { return (ERROR); } ErrorFlag = estimate_layer_ice_content_quick_flux(layer, soil_con->depth, soil_con->max_moist, soil_con->expt, soil_con->bubble, soil_con->frost_fract, soil_con->frost_slope, soil_con->FS_ACTIVE); if (ErrorFlag == ERROR) { return (ERROR); } } else { estimate_frost_temperature_and_depth(tmpT, tmpZ, soil_con->Zsum_node, energy->T, soil_con->depth, soil_con->frost_fract, soil_con->frost_slope, Nnodes, options.Nlayer); ErrorFlag = estimate_layer_temperature(layer, tmpT, tmpZ, soil_con->Zsum_node, soil_con->depth, Nnodes, options.Nlayer); if (ErrorFlag == ERROR) { return (ERROR); } ErrorFlag = estimate_layer_ice_content(layer, tmpT, tmpZ, soil_con->Zsum_node, soil_con->depth, soil_con->max_moist, soil_con->expt, soil_con->bubble, Nnodes, options.Nlayer, soil_con->FS_ACTIVE); if (ErrorFlag == ERROR) { return (ERROR); } } // free memory for tmpT and tmpZ free_3d_double(tmpTshape, tmpT); free_2d_double(tmpZshape, tmpZ); return (0); } /****************************************************************************** * @brief Iteratively solve the soil temperature profile using a numerical * difference equation. The solution equation is second order in * space, and first order in time. *****************************************************************************/ int solve_T_profile(double *T, double *T0, char *Tfbflag, unsigned *Tfbcount, double *Zsum, double *kappa, double *Cs, double *moist, double deltat, double *max_moist, double *bubble, double *expt, double *ice, double *alpha, double *beta, double *gamma, double Dp, int Nnodes, int *FIRST_SOLN, int FS_ACTIVE, int NOFLUX, int EXP_TRANS) { double *aa, *bb, *cc, *dd, *ee, Bexp; int Error; int j; // TODO: remove use of static variables (see GH #735), for now: // make static variables thread safe static double A[MAX_NODES]; static double B[MAX_NODES]; static double C[MAX_NODES]; static double D[MAX_NODES]; static double E[MAX_NODES]; #pragma omp threadprivate(A, B, C, D, E) if (FIRST_SOLN[0]) { if (EXP_TRANS) { Bexp = logf(Dp + 1.) / (double) (Nnodes - 1); } FIRST_SOLN[0] = false; if (!EXP_TRANS) { for (j = 1; j < Nnodes - 1; j++) { A[j] = Cs[j] * alpha[j - 1] * alpha[j - 1]; B[j] = (kappa[j + 1] - kappa[j - 1]) * deltat; C[j] = 2 * deltat * kappa[j] * alpha[j - 1] / gamma[j - 1]; D[j] = 2 * deltat * kappa[j] * alpha[j - 1] / beta[j - 1]; E[j] = CONST_RHOICE * CONST_LATICE * alpha[j - 1] * alpha[j - 1]; } if (NOFLUX) { j = Nnodes - 1; A[j] = Cs[j] * alpha[j - 1] * alpha[j - 1]; B[j] = (kappa[j] - kappa[j - 1]) * deltat; C[j] = 2 * deltat * kappa[j] * alpha[j - 1] / gamma[j - 1]; D[j] = 2 * deltat * kappa[j] * alpha[j - 1] / beta[j - 1]; E[j] = CONST_RHOICE * CONST_LATICE * alpha[j - 1] * alpha[j - 1]; } } else { // grid transformation terms for (j = 1; j < Nnodes - 1; j++) { A[j] = 4 * Bexp * Bexp * Cs[j] * (Zsum[j] + 1) * (Zsum[j] + 1); B[j] = (kappa[j + 1] - kappa[j - 1]) * deltat; C[j] = 4 * deltat * kappa[j]; D[j] = 2 * deltat * kappa[j] * Bexp; E[j] = 4 * Bexp * Bexp * CONST_RHOICE * CONST_LATICE * (Zsum[j] + 1) * (Zsum[j] + 1); } if (NOFLUX) { j = Nnodes - 1; A[j] = 4 * Bexp * Bexp * Cs[j] * (Zsum[j] + 1) * (Zsum[j] + 1); B[j] = (kappa[j] - kappa[j - 1]) * deltat; C[j] = 4 * deltat * kappa[j]; D[j] = 2 * deltat * kappa[j] * Bexp; E[j] = 4 * Bexp * Bexp * CONST_RHOICE * CONST_LATICE * (Zsum[j] + 1) * (Zsum[j] + 1); } } } aa = &A[0]; bb = &B[0]; cc = &C[0]; dd = &D[0]; ee = &E[0]; for (j = 0; j < Nnodes; j++) { T[j] = T0[j]; } Error = calc_soil_thermal_fluxes(Nnodes, T, T0, Tfbflag, Tfbcount, moist, max_moist, ice, bubble, expt, gamma, aa, bb, cc, dd, ee, FS_ACTIVE, NOFLUX, EXP_TRANS); return (Error); } /****************************************************************************** * @brief Iteratively solve the soil temperature profile using a numerical * difference equation. The solution equation is second order in * space, and first order in time. *****************************************************************************/ int solve_T_profile_implicit(double *T, // update double *T0, // keep char *Tfbflag, unsigned *Tfbcount, double *Zsum, // soil parameter double *kappa, // update if necessary double *Cs, // update if necessary double *moist, // keep double deltat, // model parameter double *max_moist, // soil parameter double *bubble, // soil parameter double *expt, // soil parameter double *ice, // update if necessary double *alpha, // soil parameter double *beta, // soil parameter double *gamma, // soil parameter double Dp, // soil parameter int Nnodes, // model parameter int *FIRST_SOLN, // update int NOFLUX, int EXP_TRANS, double *bulk_dens_min, // soil parameter double *soil_dens_min, // soil parameter double *quartz, // soil parameter double *bulk_density, // soil parameter double *soil_density, // soil parameter double *organic, // soil parameter double *depth) // soil parameter { extern option_struct options; int n, Error; double res[MAX_NODES]; void (*vecfunc)(double *, double *, int, int, ...); int j; if (FIRST_SOLN[0]) { FIRST_SOLN[0] = false; } // initialize fda_heat_eqn: // pass model parameters, initial states, and soil parameters // it MUST be initialized before Newton-Raphson searching if (!NOFLUX) { n = Nnodes - 2; } else { n = Nnodes - 1; } fda_heat_eqn(&T[1], res, n, 1, deltat, NOFLUX, EXP_TRANS, T0, moist, ice, kappa, Cs, max_moist, bubble, expt, alpha, beta, gamma, Zsum, Dp, bulk_dens_min, soil_dens_min, quartz, bulk_density, soil_density, organic, depth, options.Nlayer); // modified Newton-Raphson to solve for new T vecfunc = &(fda_heat_eqn); Error = newt_raph(vecfunc, &T[1], n); // update temperature boundaries if (Error == 0) { T[0] = T0[0]; // surface if (!NOFLUX) { T[Nnodes - 1] = T0[Nnodes - 1]; // bottom boundary } if (options.TFALLBACK) { // HACK to prevent runaway cold nose // Handle the case in which the a node was colder than both the nodes above and below // in the last time step, and that both differences have increased between the last // time step and the current one. for (j = 1; j < Nnodes - 1; j++) { if ((T0[j - 1] - T0[j] > 0 && T0[j + 1] - T0[j] > 0 && (T[j - 1] - T[j]) - (T0[j - 1] - T0[j]) > 0 && (T[j + 1] - T[j]) - (T0[j + 1] - T0[j]) > 0) || (T0[j - 1] - T0[j] < 0 && T0[j + 1] - T0[j] < 0 && (T[j - 1] - T[j]) - (T0[j - 1] - T0[j]) < 0 && (T[j + 1] - T[j]) - (T0[j + 1] - T0[j]) < 0)) { T[j] = 0.5 * (T[j - 1] + T[j + 1]); // crude fix for now; just average the T's without taking distance, conductivities into account Tfbflag[j] = 1; Tfbcount[j]++; } } } } return (Error); } /****************************************************************************** * @brief Calculate soil thermal fluxes *****************************************************************************/ int calc_soil_thermal_fluxes(int Nnodes, double *T, double *T0, char *Tfbflag, unsigned *Tfbcount, double *moist, double *max_moist, double *ice, double *bubble, double *expt, double *gamma, double *A, double *B, double *C, double *D, double *E, int FS_ACTIVE, int NOFLUX, int EXP_TRANS) { extern option_struct options; extern parameters_struct param; int Error; char Done; int j; int ItCount; double threshold = 1.e-2; /* temperature profile iteration threshold */ double maxdiff; double diff; double oldT; double Tlast[MAX_NODES]; Error = 0; Done = false; ItCount = 0; /* initialize Tlast */ for (j = 0; j < Nnodes; j++) { Tlast[j] = T[j]; } /* initialize Tfbflag, Tfbcount */ for (j = 0; j < Nnodes; j++) { Tfbflag[j] = 0; Tfbcount[j] = 0; } while (!Done && Error == 0 && ItCount < param.FROZEN_MAXITER) { ItCount++; maxdiff = threshold; for (j = 1; j < Nnodes - 1; j++) { oldT = T[j]; /** 2nd order variable kappa equation **/ if (T[j] >= 0 || !FS_ACTIVE || !options.FROZEN_SOIL) { if (!EXP_TRANS) { T[j] = (A[j] * T0[j] + B[j] * (T[j + 1] - T[j - 1]) + C[j] * T[j + 1] + D[j] * T[j - 1] + E[j] * (0. - ice[j])) / (A[j] + C[j] + D[j]); } else { T[j] = (A[j] * T0[j] + B[j] * (T[j + 1] - T[j - 1]) + C[j] * (T[j + 1] + T[j - 1]) - D[j] * (T[j + 1] - T[j - 1]) + E[j] * (0. - ice[j])) / (A[j] + 2. * C[j]); } } else { T[j] = root_brent(T0[j] - (param.SOIL_DT), T0[j] + (param.SOIL_DT), soil_thermal_eqn, T[j + 1], T[j - 1], T0[j], moist[j], max_moist[j], bubble[j], expt[j], ice[j], A[j], B[j], C[j], D[j], E[j], EXP_TRANS, j); if (T[j] <= -998) { if (options.TFALLBACK) { T[j] = T0[j]; Tfbflag[j] = 1; Tfbcount[j]++; } else { error_solve_T_profile(T[j], T[j + 1], T[j - 1], T0[j], moist[j], max_moist[j], bubble[j], expt[j], ice[j], gamma[j - 1], A[j], B[j], C[j], D[j], E[j]); return (ERROR); } } } diff = fabs(oldT - T[j]); if (diff > maxdiff) { maxdiff = diff; } } if (NOFLUX) { /** Solve for bottom temperature if using no flux lower boundary **/ j = Nnodes - 1; oldT = T[j]; if (T[j] >= 0 || !FS_ACTIVE || !options.FROZEN_SOIL) { if (!EXP_TRANS) { T[j] = (A[j] * T0[j] + B[j] * (T[j] - T[j - 1]) + C[j] * T[j] + D[j] * T[j - 1] + E[j] * (0. - ice[j])) / (A[j] + C[j] + D[j]); } else { T[j] = (A[j] * T0[j] + B[j] * (T[j] - T[j - 1]) + C[j] * (T[j] + T[j - 1]) - D[j] * (T[j] - T[j - 1]) + E[j] * (0. - ice[j])) / (A[j] + 2. * C[j]); } } else { T[Nnodes - 1] = root_brent(T0[Nnodes - 1] - param.SOIL_DT, T0[Nnodes - 1] + param.SOIL_DT, soil_thermal_eqn, T[Nnodes - 1], T[Nnodes - 2], T0[Nnodes - 1], moist[Nnodes - 1], max_moist[Nnodes - 1], bubble[j], expt[Nnodes - 1], ice[Nnodes - 1], A[j], B[j], C[j], D[j], E[j], EXP_TRANS, j); if (T[j] <= -998) { if (options.TFALLBACK) { T[j] = T0[j]; Tfbflag[j] = 1; Tfbcount[j]++; } else { error_solve_T_profile(T[Nnodes - 1], T[Nnodes - 1], T[Nnodes - 2], T0[Nnodes - 1], moist[Nnodes - 1], max_moist[Nnodes - 1], bubble[Nnodes - 1], expt[Nnodes - 1], ice[Nnodes - 1], gamma[Nnodes - 2], A[j], B[j], C[j], D[j], E[j]); return (ERROR); } } } diff = fabs(oldT - T[Nnodes - 1]); if (diff > maxdiff) { maxdiff = diff; } } if (maxdiff <= threshold) { Done = true; } } if (options.TFALLBACK) { // HACK to prevent runaway cold nose // Handle the case in which the a node was colder than both the nodes above and below // in the last time step, and that both differences have increased between the last // time step and the current one. for (j = 1; j < Nnodes - 1; j++) { if ((Tlast[j - 1] - Tlast[j] > 0 && Tlast[j + 1] - Tlast[j] > 0 && (T[j - 1] - T[j]) - (Tlast[j - 1] - Tlast[j]) > 0 && (T[j + 1] - T[j]) - (Tlast[j + 1] - Tlast[j]) > 0) || (Tlast[j - 1] - Tlast[j] < 0 && Tlast[j + 1] - Tlast[j] < 0 && (T[j - 1] - T[j]) - (Tlast[j - 1] - Tlast[j]) < 0 && (T[j + 1] - T[j]) - (Tlast[j + 1] - Tlast[j]) < 0)) { T[j] = 0.5 * (T[j - 1] + T[j + 1]); // crude fix for now; just average the T's without taking distance, conductivities into account Tfbflag[j] = 1; Tfbcount[j]++; } } } if (!Done && !Error) { if (options.TFALLBACK) { for (j = 0; j < Nnodes; j++) { T[j] = T0[j]; Tfbflag[j] = 1; Tfbcount[j]++; } } else { fprintf(LOG_DEST, "ERROR: Temperature Profile Unable to Converge!!!\n"); fprintf(LOG_DEST, "Dumping Profile Temperatures (last, new).\n"); for (j = 0; j < Nnodes; j++) { fprintf(LOG_DEST, "%f\t%f\n", T0[j], T[j]); } log_err("Cannot solve temperature profile:\n" "\tToo Many Iterations in solve_T_profile"); return (ERROR); } } return (Error); } /****************************************************************************** * @brief Dummy function to allow calling of error_print_solve_T_profile() *****************************************************************************/ double error_solve_T_profile(double Tj, ...) { va_list ap; double error; va_start(ap, Tj); error = error_print_solve_T_profile(Tj, ap); va_end(ap); return error; } /****************************************************************************** * @brief Print soil temperature terms. *****************************************************************************/ double error_print_solve_T_profile(double T, va_list ap) { double TL; double TU; double T0; double moist; double max_moist; double bubble; double expt; double ice0; double gamma; double A; double B; double C; double D; double E; TL = (double) va_arg(ap, double); TU = (double) va_arg(ap, double); T0 = (double) va_arg(ap, double); moist = (double) va_arg(ap, double); max_moist = (double) va_arg(ap, double); bubble = (double) va_arg(ap, double); expt = (double) va_arg(ap, double); ice0 = (double) va_arg(ap, double); gamma = (double) va_arg(ap, double); A = (double) va_arg(ap, double); B = (double) va_arg(ap, double); C = (double) va_arg(ap, double); D = (double) va_arg(ap, double); E = (double) va_arg(ap, double); log_warn("solve_T_profile failed to converge to a solution " "in root_brent. Variable values will be dumped to the " "screen, check for invalid values."); fprintf(LOG_DEST, "T\t%f\n", T); fprintf(LOG_DEST, "TL\t%f\n", TL); fprintf(LOG_DEST, "TU\t%f\n", TU); fprintf(LOG_DEST, "T0\t%f\n", T0); fprintf(LOG_DEST, "moist\t%f\n", moist); fprintf(LOG_DEST, "max_moist\t%f\n", max_moist); fprintf(LOG_DEST, "bubble\t%f\n", bubble); fprintf(LOG_DEST, "expt\t%f\n", expt); fprintf(LOG_DEST, "ice0\t%f\n", ice0); fprintf(LOG_DEST, "gamma\t%f\n", gamma); fprintf(LOG_DEST, "A\t%f\n", A); fprintf(LOG_DEST, "B\t%f\n", B); fprintf(LOG_DEST, "C\t%f\n", C); fprintf(LOG_DEST, "D\t%f\n", D); fprintf(LOG_DEST, "E\t%f\n", E); log_warn("Finished dumping values for solve_T_profile.\n" "Try increasing SOIL_DT to get model to complete cell.\n" "Then check output for instabilities."); return(ERROR); } /****************************************************************************** * @brief Heat Equation for implicit scheme (used to calculate residual of * the heat equation) passed from solve_T_profile_implicit *****************************************************************************/ void fda_heat_eqn(double T_2[], double res[], int n, int init, ...) { char PAST_BOTTOM; double storage_term, flux_term, phase_term, flux_term1, flux_term2; double Lsum; int i; size_t lidx; int focus, left, right; // argument list handling va_list arg_addr; // TODO: remove use of static variables (see GH #735), for now: // make static variables thread safe static double deltat; static int NOFLUX; static int EXP_TRANS; static double *T0; static double *moist; static double *ice; static double *kappa; static double *Cs; static double *max_moist; static double *bubble; static double *expt; static double *alpha; static double *beta; static double *gamma; static double *Zsum; static double Dp; static double *bulk_dens_min; static double *soil_dens_min; static double *quartz; static double *bulk_density; static double *soil_density; static double *organic; static double *depth; static size_t Nlayers; // variables used to calculate residual of the heat equation // defined here static double Ts; static double Tb; // locally used variables static double ice_new[MAX_NODES], Cs_new[MAX_NODES], kappa_new[MAX_NODES]; static double DT[MAX_NODES], DT_down[MAX_NODES], DT_up[MAX_NODES]; static double Dkappa[MAX_NODES]; static double Bexp; #pragma omp threadprivate(deltat, NOFLUX, EXP_TRANS, T0, moist, ice, \ kappa, Cs, max_moist, bubble, expt, alpha, beta, gamma, Zsum, Dp, \ bulk_dens_min, soil_dens_min, quartz, bulk_density, soil_density, organic, \ depth, Nlayers, Ts, Tb, ice_new, Cs_new, kappa_new, DT, DT_down, DT_up, \ Dkappa, Bexp) // initialize variables if init==1 if (init == 1) { va_start(arg_addr, init); deltat = va_arg(arg_addr, double); NOFLUX = va_arg(arg_addr, int); EXP_TRANS = va_arg(arg_addr, int); T0 = va_arg(arg_addr, double *); moist = va_arg(arg_addr, double *); ice = va_arg(arg_addr, double *); kappa = va_arg(arg_addr, double *); Cs = va_arg(arg_addr, double *); max_moist = va_arg(arg_addr, double *); bubble = va_arg(arg_addr, double *); expt = va_arg(arg_addr, double *); alpha = va_arg(arg_addr, double *); beta = va_arg(arg_addr, double *); gamma = va_arg(arg_addr, double *); Zsum = va_arg(arg_addr, double *); Dp = va_arg(arg_addr, double); bulk_dens_min = va_arg(arg_addr, double *); soil_dens_min = va_arg(arg_addr, double *); quartz = va_arg(arg_addr, double *); bulk_density = va_arg(arg_addr, double *); soil_density = va_arg(arg_addr, double *); organic = va_arg(arg_addr, double *); depth = va_arg(arg_addr, double *); Nlayers = va_arg(arg_addr, size_t); if (EXP_TRANS) { if (!NOFLUX) { Bexp = logf(Dp + 1.) / (double)(n + 1); } else { Bexp = logf(Dp + 1.) / (double)(n); } } Ts = T0[0]; if (!NOFLUX) { Tb = T0[n + 1]; } else { Tb = T0[n]; } for (i = 0; i < n; i++) { T_2[i] = T0[i + 1]; } } // calculate residuals if init==0 else { // get the range of columns to calculate va_start(arg_addr, init); focus = va_arg(arg_addr, int); // calculate all entries if focus == -1 if (focus == -1) { lidx = 0; Lsum = 0.; PAST_BOTTOM = false; for (i = 0; i < n + 1; i++) { kappa_new[i] = kappa[i]; if (i >= 1) { // all but surface node // update ice contents if (T_2[i - 1] < 0) { ice_new[i] = moist[i] - maximum_unfrozen_water( T_2[i - 1], max_moist[ i], bubble[i], expt[i]); if (ice_new[i] < 0) { ice_new[i] = 0; } } else { ice_new[i] = 0; } Cs_new[i] = Cs[i]; // update other states due to ice content change /***********************************************/ if (ice_new[i] != ice[i]) { kappa_new[i] = soil_conductivity(moist[i], moist[i] - ice_new[i], soil_dens_min[lidx], bulk_dens_min[lidx], quartz[lidx], soil_density[lidx], bulk_density[lidx], organic[lidx]); Cs_new[i] = volumetric_heat_capacity( bulk_density[lidx] / soil_density[lidx], moist[i] - ice_new[i], ice_new[i], organic[lidx]); } /************************************************/ } if (Zsum[i] > Lsum + depth[lidx] && !PAST_BOTTOM) { Lsum += depth[lidx]; lidx++; if (lidx == Nlayers) { PAST_BOTTOM = true; lidx = Nlayers - 1; } } } // constants used in fda equation for (i = 0; i < n; i++) { if (i == 0) { DT[i] = T_2[i + 1] - Ts; DT_up[i] = T_2[i] - Ts; DT_down[i] = T_2[i + 1] - T_2[i]; } else if (i == n - 1) { DT[i] = Tb - T_2[i - 1]; DT_up[i] = T_2[i] - T_2[i - 1]; DT_down[i] = Tb - T_2[i]; } else { DT[i] = T_2[i + 1] - T_2[i - 1]; DT_up[i] = T_2[i] - T_2[i - 1]; DT_down[i] = T_2[i + 1] - T_2[i]; } if (i < n - 1) { Dkappa[i] = kappa_new[i + 2] - kappa_new[i]; } else if (!NOFLUX) { Dkappa[i] = kappa_new[i + 2] - kappa_new[i]; } else { Dkappa[i] = kappa_new[i + 1] - kappa_new[i]; } } for (i = 0; i < n; i++) { storage_term = Cs_new[i + 1] * (T_2[i] - T0[i + 1]) / deltat + T_2[i] * (Cs_new[i + 1] - Cs[i + 1]) / deltat; if (!EXP_TRANS) { flux_term1 = Dkappa[i] / alpha[i] * DT[i] / alpha[i]; flux_term2 = kappa_new[i + 1] * (DT_down[i] / gamma[i] - DT_up[i] / beta[i]) / (0.5 * alpha[i]); } else { // grid transformation flux_term1 = Dkappa[i] / 2. * DT[i] / 2. / (Bexp * (Zsum[i + 1] + 1.)) / (Bexp * (Zsum[i + 1] + 1.)); flux_term2 = kappa_new[i + 1] * ((DT_down[i] - DT_up[i]) / (Bexp * (Zsum[i + 1] + 1.)) / (Bexp * (Zsum[i + 1] + 1.)) - DT[i] / 2. / (Bexp * (Zsum[i + 1] + 1.) * (Zsum[i + 1] + 1.))); } // inelegant fix for "cold nose" problem - when a very cold node skates off to // much colder and breaks the second law of thermodynamics (because // flux_term1 exceeds flux_term2 in absolute magnitude) - therefore, don't let // that node get any colder. This only seems to happen in the first and // second near-surface nodes. flux_term = flux_term1 + flux_term2; phase_term = CONST_RHOICE * CONST_LATICE * (ice_new[i + 1] - ice[i + 1]) / deltat; res[i] = flux_term + phase_term - storage_term; } } // only calculate entries focus-1, focus, and focus+1 if focus has a value>=0 else { if (focus == 0) { left = 0; } else { left = focus - 1; } if (focus == n - 1) { right = n - 1; } else { right = focus + 1; } // update ice content for node focus and its adjacents for (i = left; i <= right; i++) { if (T_2[i] < 0) { ice_new[i + 1] = moist[i + 1] - maximum_unfrozen_water( T_2[i], max_moist[ i + 1], bubble[i + 1], expt[i + 1]); if (ice_new[i + 1] < 0) { ice_new[i + 1] = 0; } } else { ice_new[i + 1] = 0; } } // update other parameters due to ice content change /********************************************************/ lidx = 0; Lsum = 0.; PAST_BOTTOM = false; for (i = 0; i <= right + 1; i++) { if (i >= left + 1) { if (ice_new[i] != ice[i]) { kappa_new[i] = soil_conductivity(moist[i], moist[i] - ice_new[i], soil_dens_min[lidx], bulk_dens_min[lidx], quartz[lidx], soil_density[lidx], bulk_density[lidx], organic[lidx]); Cs_new[i] = volumetric_heat_capacity( bulk_density[lidx] / soil_density[lidx], moist[i] - ice_new[i], ice_new[i], organic[lidx]); } } if (Zsum[i] > Lsum + depth[lidx] && !PAST_BOTTOM) { Lsum += depth[lidx]; lidx++; if (lidx == Nlayers) { PAST_BOTTOM = true; lidx = Nlayers - 1; } } } /*********************************************************/ // update other states due to ice content change for (i = left; i <= right; i++) { if (i == 0) { DT[i] = T_2[i + 1] - Ts; DT_up[i] = T_2[i] - Ts; DT_down[i] = T_2[i + 1] - T_2[i]; } else if (i == n - 1) { DT[i] = Tb - T_2[i - 1]; DT_up[i] = T_2[i] - T_2[i - 1]; DT_down[i] = Tb - T_2[i]; } else { DT[i] = T_2[i + 1] - T_2[i - 1]; DT_up[i] = T_2[i] - T_2[i - 1]; DT_down[i] = T_2[i + 1] - T_2[i]; } // update Dkappa due to ice content change /*******************************************/ if (i < n - 1) { Dkappa[i] = kappa_new[i + 2] - kappa_new[i]; } else if (!NOFLUX) { Dkappa[i] = kappa_new[i + 2] - kappa_new[i]; } else { Dkappa[i] = kappa_new[i + 1] - kappa_new[i]; } /********************************************/ } for (i = left; i <= right; i++) { storage_term = Cs_new[i + 1] * (T_2[i] - T0[i + 1]) / deltat + T_2[i] * (Cs_new[i + 1] - Cs[i + 1]) / deltat; if (!EXP_TRANS) { flux_term1 = Dkappa[i] / alpha[i] * DT[i] / alpha[i]; flux_term2 = kappa_new[i + 1] * (DT_down[i] / gamma[i] - DT_up[i] / beta[i]) / (0.5 * alpha[i]); } else { // grid transformation flux_term1 = Dkappa[i] / 2. * DT[i] / 2. / (Bexp * (Zsum[i + 1] + 1.)) / (Bexp * (Zsum[i + 1] + 1.)); flux_term2 = kappa_new[i + 1] * ((DT_down[i] - DT_up[i]) / (Bexp * (Zsum[i + 1] + 1.)) / (Bexp * (Zsum[i + 1] + 1.)) - DT[i] / 2. / (Bexp * (Zsum[i + 1] + 1.) * (Zsum[i + 1] + 1.))); } // inelegant fix for "cold nose" problem - when a very cold node skates off to // much colder and breaks the second law of thermodynamics (because // flux_term1 exceeds flux_term2 in absolute magnitude) - therefore, don't let // that node get any colder. This only seems to happen in the first and // second near-surface nodes. flux_term = flux_term1 + flux_term2; phase_term = CONST_RHOICE * CONST_LATICE * (ice_new[i + 1] - ice[i + 1]) / deltat; res[i] = flux_term + phase_term - storage_term; } } // end of calculation of focus node only } // end of non-init }
yescrypt-opt.c
/*- * Copyright 2009 Colin Percival * Copyright 2013,2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #include <errno.h> #include <stdint.h> #include <stdlib.h> #include "sha256_Y.h" #include "sysendian.h" #include "yescrypt-platform.c" static inline uint32_t le32dec(const void *pp) { const uint8_t *p = (uint8_t const *)pp; return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) + ((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24)); } static inline void le32enc(void *pp, uint32_t x) { uint8_t * p = (uint8_t *)pp; p[0] = x & 0xff; p[1] = (x >> 8) & 0xff; p[2] = (x >> 16) & 0xff; p[3] = (x >> 24) & 0xff; } static inline void blkcpy(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; } while (count -= 4); } static inline void blkxor(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; } while (count -= 4); } typedef union { uint32_t w[16]; uint64_t d[8]; } salsa20_blk_t; static inline void salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32); COMBINE(0, 0, 2) COMBINE(1, 5, 7) COMBINE(2, 2, 4) COMBINE(3, 7, 1) COMBINE(4, 4, 6) COMBINE(5, 1, 3) COMBINE(6, 6, 0) COMBINE(7, 3, 5) #undef COMBINE } static inline void salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->w[out * 2] = Bin->d[in1]; \ Bout->w[out * 2 + 1] = Bin->d[in2] >> 32; COMBINE(0, 0, 6) COMBINE(1, 5, 3) COMBINE(2, 2, 0) COMBINE(3, 7, 5) COMBINE(4, 4, 2) COMBINE(5, 1, 7) COMBINE(6, 6, 4) COMBINE(7, 3, 1) #undef COMBINE } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint64_t B[8]) { size_t i; salsa20_blk_t X; #define x X.w salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } #undef x { salsa20_blk_t Y; salsa20_simd_shuffle(&X, &Y); for (i = 0; i < 16; i += 4) { ((salsa20_blk_t *)B)->w[i] += Y.w[i]; ((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1]; ((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2]; ((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3]; } } } /** * blockmix_salsa8(Bin, Bout, X, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. The * temporary space X must be 64 bytes. */ static void blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r) { size_t i; /* 1: X <-- B_{2r - 1} */ blkcpy(X, &Bin[(2 * r - 1) * 8], 8); /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < 2 * r; i += 2) { /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4], X, 8); /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8 + 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4 + r * 8], X, 8); } } /* These are tunable */ #define S_BITS 8 #define S_SIMD 2 #define S_P 4 #define S_ROUNDS 6 /* Number of S-boxes. Not tunable, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable on their own. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD) #define S_P_SIZE (S_P * S_SIMD) #define S_MIN_R ((S_P * S_SIMD + 15) / 16) /** * pwxform(B): * Transform the provided block using the provided S-boxes. */ static void block_pwxform(uint64_t * B, const uint64_t * S) { uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B; const uint8_t *S0 = (const uint8_t *)S; const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD); size_t i, j; #if S_SIMD > 2 size_t k; #endif for (j = 0; j < S_P; j++) { uint64_t *Xj = X[j]; uint64_t x0 = Xj[0]; #if S_SIMD > 1 uint64_t x1 = Xj[1]; #endif for (i = 0; i < S_ROUNDS; i++) { uint64_t x = x0 & S_MASK2; const uint64_t *p0, *p1; p0 = (const uint64_t *)(S0 + (uint32_t)x); p1 = (const uint64_t *)(S1 + (x >> 32)); x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0; x0 += p0[0]; x0 ^= p1[0]; #if S_SIMD > 1 x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1; x1 += p0[1]; x1 ^= p1[1]; #endif #if S_SIMD > 2 for (k = 2; k < S_SIMD; k++) { x = Xj[k]; x = (uint64_t)(x >> 32) * (uint32_t)x; x += p0[k]; x ^= p1[k]; Xj[k] = x; } #endif } Xj[0] = x0; #if S_SIMD > 1 Xj[1] = x1; #endif } } /** * blockmix_pwxform(Bin, Bout, S, r): * Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. * * S lacks const qualifier to match blockmix_salsa8()'s prototype, which we * need to refer to both functions via the same function pointers. */ static void blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r) { size_t r1, r2, i; /* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */ r1 = r * 128 / (S_P_SIZE * 8); /* X <-- B_{r1 - 1} */ blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE); /* X <-- X \xor B_i */ blkxor(Bout, Bin, S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(Bout, S); /* for i = 0 to r1 - 1 do */ for (i = 1; i < r1; i++) { /* X <-- X \xor B_i */ blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE], S_P_SIZE); blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(&Bout[i * S_P_SIZE], S); } /* Handle partial blocks */ if (i * S_P_SIZE < r * 16) blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], r * 16 - i * S_P_SIZE); i = (r1 - 1) * S_P_SIZE / 8; /* Convert 128-byte blocks to 64-byte blocks */ r2 = r * 2; /* B'_i <-- H(B'_i) */ salsa20_8(&Bout[i * 8]); i++; for (; i < r2; i++) { /* B'_i <-- H(B'_i \xor B'_{i-1}) */ blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8); salsa20_8(&Bout[i * 8]); } } /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint64_t integerify(const uint64_t * B, size_t r) { /* * Our 64-bit words are in host byte order, and word 6 holds the second 32-bit * word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also * in host byte order, as it should be. */ const uint64_t * X = &B[(2 * r - 1) * 8]; uint32_t lo = X[0]; uint32_t hi = X[6] >> 32; return ((uint64_t)hi << 32) + lo; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be even and * no smaller than 2. */ static void smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 16 * r; uint64_t * X = V; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t n, i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ blockmix(X, Y, Z, r); blkcpy(&V[s], Y, s); X = XY; if (NROM && (VROM_mask & 1)) { if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i + 1 - n; /* X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); } blockmix(Y, X, Z, r); } } else { yescrypt_flags_t rw = flags & YESCRYPT_RW; /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if (rw) { if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); if (rw) { /* j <-- Wrap(Integerify(X), i) */ j = integerify(Y, r) & (n - 1); j += (i + 1) - n; /* X <-- X \xor V_j */ blkxor(Y, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); } } /* B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The value Nloop must be even. */ static void smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1 | 1; size_t s = 16 * r; yescrypt_flags_t rw = flags & YESCRYPT_RW; uint64_t * X = XY; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t i, j; size_t k; if (Nloop == 0) return; /* X <-- B' */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* 7: j <-- Integerify(X) mod N */ j &= N - 1; /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); } blockmix(Y, X, Z, r); } } else { /* 6: for i = 0 to N - 1 do */ i = Nloop / 2; do { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); blockmix(Y, X, Z, r); } while (--i); } /* 10: B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage * XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is * required with OpenMP-enabled builds). The value N must be a power of 2 * greater than 1. */ static void smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { size_t s = 16 * r; uint64_t Nchunk = N / p, Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint64_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t Vchunk = i * Nchunk; uint64_t * Bp = &B[i * s]; uint64_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 16, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t * Bp = &B[i * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint64_t * B, * V, * XY, * S; uint64_t sha256[4]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) { errno = EINVAL; return -1; } #if S_MIN_R > 1 if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) { errno = EINVAL; return -1; } #endif if ((p > SIZE_MAX / ((size_t)256 * r + 64)) || #if SIZE_MAX / 256 <= UINT32_MAX (r > SIZE_MAX / 256) || #endif (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (N > UINT64_MAX / ((uint64_t)t + 1)) { errno = EFBIG; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (uint64_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r + 64; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL * sizeof(*S); #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint64_t *)tmp.aligned; XY = (uint64_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint64_t *)local->aligned; V = (uint64_t *)((uint8_t *)B + B_size); XY = (uint64_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint64_t *)((uint8_t *)XY + XY_size); if (t || flags) { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, passwd, passwdlen); SHA256_Final_Y((uint8_t *)sha256, &ctx); passwd = (uint8_t *)sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256_Y(passwd, passwdlen, salt, saltlen, 1, (uint8_t *)B, B_size); if (t || flags) blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0])); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, &V[(size_t)16 * r * i * N], NROM, shared, &XY[((size_t)32 * r + 8) * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256_Y(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, buf, buflen); if (r == 32) { // yescryptR32 HMAC_SHA256_Update_Y(&ctx, "WaviBanana", 10); } else if (r == 16) { // yescryptR16 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else if (r == 8) { // yescryptR8 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else { // yescrypt HMAC_SHA256_Update_Y(&ctx, salt, saltlen); } HMAC_SHA256_Final_Y((uint8_t *)sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, (uint8_t *)sha256, sizeof(sha256)); SHA256_Final_Y(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
par_relax.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Relaxation scheme * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "Common.h" #include "_hypre_lapack.h" #include "../sstruct_ls/gselim.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGRelax *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int *cf_marker, HYPRE_Int relax_type, HYPRE_Int relax_points, HYPRE_Real relax_weight, HYPRE_Real omega, HYPRE_Real *l1_norms, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local; HYPRE_Real *Vtemp_data; if (relax_type != 10) { Vtemp_local = hypre_ParVectorLocalVector(Vtemp); Vtemp_data = hypre_VectorData(Vtemp_local); } HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data = NULL; HYPRE_Real *tmp_data; hypre_Vector *Ztemp_local; HYPRE_Real *Ztemp_data; hypre_CSRMatrix *A_CSR; HYPRE_Int *A_CSR_i; HYPRE_Int *A_CSR_j; HYPRE_Real *A_CSR_data; hypre_Vector *f_vector; HYPRE_Real *f_vector_data; HYPRE_Int i, j, jr; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int column; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int num_recvs; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id, ip, p; HYPRE_Int vec_start, vec_len; hypre_MPI_Status *status; hypre_MPI_Request *requests; HYPRE_Real *A_mat; HYPRE_Real *b_vec; HYPRE_Real zero = 0.0; HYPRE_Real res, res0, res2; HYPRE_Real one_minus_weight; HYPRE_Real one_minus_omega; HYPRE_Real prod; one_minus_weight = 1.0 - relax_weight; one_minus_omega = 1.0 - omega; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*----------------------------------------------------------------------- * Switch statement to direct control based on relax_type: * relax_type = 0 -> Jacobi or CF-Jacobi * relax_type = 1 -> Gauss-Seidel <--- very slow, sequential * relax_type = 2 -> Gauss_Seidel: interior points in parallel , * boundary sequential * relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (forward solve) * relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor * with outer relaxation parameters (backward solve) * relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node * relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor * with outer relaxation parameters * relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR * relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel * relax_type = 10 -> On-processor direct forward solve for matrices with * triangular structure (indices need not be ordered * triangular) * relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve * relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve * relax_type = 15 -> CG * relax_type = 16 -> Scaled Chebyshev * relax_type = 17 -> FCF-Jacobi * relax_type = 18 -> L1-Jacobi * relax_type = 9, 99, 98 -> Direct solve, Gaussian elimination * relax_type = 19-> Direct Solve, (old version) * relax_type = 29-> Direct solve: use gaussian elimination & BLAS * (with pivoting) (old version) *-----------------------------------------------------------------------*/ switch (relax_type) { case 0: /* Weighted Jacobi */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* printf("!! Proc %d: n %d, num_sends %d, num_cols_offd %d\n", my_id, n, num_sends, num_cols_offd); */ v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= one_minus_weight; u_data[i] += relax_weight * res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_points == 0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; /* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */ case 3: { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } #if defined(HYPRE_USING_PERSISTENT_COMM) // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #if defined(HYPRE_USING_PERSISTENT_COMM) persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); #endif if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { v_buf_data[i-begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data); #else comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data); #endif /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if defined(HYPRE_USING_PERSISTENT_COMM) hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif } break; case 1: /* Gauss-Seidel VERY SLOW */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } } break; case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST); requests= hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ /* for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } */ /*----------------------------------------------------------------- * Relax interior points first *-----------------------------------------------------------------*/ if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) == zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } for (p = 0; p < num_procs; p++) { jr = 0; if (p != my_id) { for (i = 0; i < num_sends; i++) { ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (ip == p) { vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start; for (j=vec_start; j < vec_start+vec_len; j++) v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } } hypre_MPI_Waitall(jr,requests,status); hypre_MPI_Barrier(comm); } else { if (num_procs > 1) { for (i = 0; i < num_recvs; i++) { ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i); vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start; hypre_MPI_Irecv(&Vext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]); } hypre_MPI_Waitall(jr,requests,status); } if (relax_points == 0) { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ((A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && (A_offd_i[i+1]-A_offd_i[i]) != zero && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } if (num_procs > 1) hypre_MPI_Barrier(comm); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } } break; case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */ { if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/ SSOR on-processor with outer relaxation parameter */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] = res / A_diag_data[A_diag_i[i]]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,res0,res2,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / A_diag_data[A_diag_i[i]]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / A_diag_data[A_diag_i[i]];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; case 7: /* Jacobi (uses ParMatvec) */ { /*----------------------------------------------------------------- * Copy f into temporary vector. *-----------------------------------------------------------------*/ hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(Vtemp), HYPRE_MEMORY_DEVICE); hypre_SeqVectorPrefetch(hypre_ParVectorLocalVector(f), HYPRE_MEMORY_DEVICE); hypre_ParVectorCopy(f, Vtemp); /*----------------------------------------------------------------- * Perform Matvec Vtemp=f-Au *-----------------------------------------------------------------*/ hypre_ParCSRMatrixMatvec(-relax_weight,A, u, relax_weight, Vtemp); #if defined(HYPRE_USING_CUDA) hypreDevice_IVAXPY(n, l1_norms, Vtemp_data, u_data); #else for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ u_data[i] += Vtemp_data[i] / l1_norms[i]; } #endif } break; case 8: /* hybrid L1 Symm. Gauss-Seidel */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else { res -= A_diag_data[jj] * tmp_data[ii]; } } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else { res -= A_diag_data[jj] * tmp_data[ii]; } } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else { res -= A_diag_data[jj] * tmp_data[ii]; } } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; /* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */ case 10: { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } #ifdef HYPRE_USING_PERSISTENT_COMM // JSP: persistent comm can be similarly used for other smoothers hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (num_procs > 1) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); Vext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); #else v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); #endif if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { v_buf_data[i - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data); #else comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); #endif /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, Vext_data); #else hypre_ParCSRCommHandleDestroy(comm_handle); #endif comm_handle = NULL; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif } // Check for ordering of matrix. If stored, get pointer, otherwise // compute ordering and point matrix variable to array. HYPRE_Int *proc_ordering; if (!hypre_ParCSRMatrixProcOrdering(A)) { proc_ordering = hypre_CTAlloc(HYPRE_Int, n, HYPRE_MEMORY_HOST); hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, n); hypre_ParCSRMatrixProcOrdering(A) = proc_ordering; } else { proc_ordering = hypre_ParCSRMatrixProcOrdering(A); } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime(); #endif if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { HYPRE_Int row = proc_ordering[i]; /*----------------------------------------------------------- * If diagonal is nonzero, relax point row; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[row]] != zero) { res = f_data[row]; for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[row] = res / A_diag_data[A_diag_i[row]]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { HYPRE_Int row = proc_ordering[i]; /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( A_diag_data[A_diag_i[row]] != zero) { res = f_data[row]; for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[row] = res / A_diag_data[A_diag_i[row]]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { HYPRE_Int row = proc_ordering[i]; /*----------------------------------------------------------- * If row is of the right type ( C or F ) and diagonal is * nonzero, relax point row; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[row] == relax_points && A_diag_data[A_diag_i[row]] != zero) { res = f_data[row]; for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) res -= A_diag_data[jj] * u_data[ii]; else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[row] = res / A_diag_data[A_diag_i[row]]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { HYPRE_Int row = proc_ordering[i]; /*----------------------------------------------------------- * If row is of the right type ( C or F ) and diagonal is * nonzero, relax point row; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[row] == relax_points && A_diag_data[A_diag_i[row]] != zero) { res = f_data[row]; for (jj = A_diag_i[row]+1; jj < A_diag_i[row+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[row]; jj < A_offd_i[row+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[row] = res / A_diag_data[A_diag_i[row]]; } } } } #ifndef HYPRE_USING_PERSISTENT_COMM if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime(); #endif } break; case 13: /* hybrid L1 Gauss-Seidel forward solve */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { tmp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else { res -= A_diag_data[jj] * tmp_data[ii]; } } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = 0; i < n; i++) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; case 14: /* hybrid L1 Gauss-Seidel backward solve */ { if (num_threads > 1) { Ztemp_local = hypre_ParVectorLocalVector(Ztemp); Ztemp_data = hypre_VectorData(Ztemp_local); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) { v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax all points. *-----------------------------------------------------------------*/ if (relax_weight == 1 && omega == 1) { if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * u_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } } } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } prod = (1.0-relax_weight*omega); if (relax_points == 0) { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = n-1; i > -1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if ( l1_norms[i] != zero) { res0 = 0.0; res = f_data[i]; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } /*----------------------------------------------------------------- * Relax only C or F points as determined by relax_points. *-----------------------------------------------------------------*/ else { if (num_threads > 1) { tmp_data = Ztemp_data; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ne-1; i > ns-1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res0 = 0.0; res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res2 += A_diag_data[jj] * Vtemp_data[ii]; res0 -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } else { for (i = n-1; i > -1; i--) /* relax interior points */ { /*----------------------------------------------------------- * If i is of the right type ( C or F ) and diagonal is * nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (cf_marker[i] == relax_points && l1_norms[i] != zero) { res = f_data[i]; res0 = 0.0; res2 = 0.0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res0 -= A_diag_data[jj] * u_data[ii]; res2 += A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] *= prod; u_data[i] += relax_weight*(omega*res + res0 + one_minus_omega*res2) / l1_norms[i]; /*u_data[i] += omega*(relax_weight*res + res0 + one_minus_weight*res2) / l1_norms[i];*/ } } } } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } } break; case 19: /* Direct solve: use gaussian elimination */ { HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ #ifdef HYPRE_NO_GLOBAL_PARTITION /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif if (n) { #ifndef HYPRE_NO_GLOBAL_PARTITION A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { column = A_CSR_j[jj]; A_mat[i*n_global+column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } hypre_gselim(A_mat,b_vec,n_global,relax_error); for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #ifdef HYPRE_NO_GLOBAL_PARTITION else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #endif } break; case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */ { HYPRE_Int n_global = (HYPRE_Int) global_num_rows; HYPRE_Int first_index = (HYPRE_Int) first_ind; HYPRE_Int info; HYPRE_Int one_i = 1; HYPRE_Int *piv; /*----------------------------------------------------------------- * Generate CSR matrix from ParCSRMatrix A *-----------------------------------------------------------------*/ #ifdef HYPRE_NO_GLOBAL_PARTITION /* all processors are needed for these routines */ A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif if (n) { #ifndef HYPRE_NO_GLOBAL_PARTITION A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A); f_vector = hypre_ParVectorToVectorAll(f); #endif A_CSR_i = hypre_CSRMatrixI(A_CSR); A_CSR_j = hypre_CSRMatrixJ(A_CSR); A_CSR_data = hypre_CSRMatrixData(A_CSR); f_vector_data = hypre_VectorData(f_vector); A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST); b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST); /*--------------------------------------------------------------- * Load CSR matrix into A_mat. *---------------------------------------------------------------*/ for (i = 0; i < n_global; i++) { for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++) { /* need col major */ column = A_CSR_j[jj]; A_mat[i + n_global*column] = A_CSR_data[jj]; } b_vec[i] = f_vector_data[i]; } piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST); /* write over A with LU */ hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info); /*now b_vec = inv(A)*b_vec */ hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info); hypre_TFree(piv, HYPRE_MEMORY_HOST); for (i = 0; i < n; i++) { u_data[i] = b_vec[first_index+i]; } hypre_TFree(A_mat, HYPRE_MEMORY_HOST); hypre_TFree(b_vec, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #ifdef HYPRE_NO_GLOBAL_PARTITION else { hypre_CSRMatrixDestroy(A_CSR); A_CSR = NULL; hypre_SeqVectorDestroy(f_vector); f_vector = NULL; } #endif } break; } return (relax_error); }
1.norace1.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i][j - 1]; } // CHECK: Region is Data Race Free. // END
nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP DEV implementation * of the NVECTOR module. * -----------------------------------------------------------------*/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private functions for special cases of vector operations */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */ /* Private functions for special cases of vector array operations */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */ static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */ static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */ static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */ static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */ static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */ static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */ /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v) { return SUNDIALS_NVEC_OPENMPDEV; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length, SUNContext sunctx) { N_Vector v; N_VectorContent_OpenMPDEV content; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(sunctx); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV; v->ops->nvclone = N_VClone_OpenMPDEV; v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV; v->ops->nvdestroy = N_VDestroy_OpenMPDEV; v->ops->nvspace = N_VSpace_OpenMPDEV; v->ops->nvgetlength = N_VGetLength_OpenMPDEV; v->ops->nvgetarraypointer = N_VGetHostArrayPointer_OpenMPDEV; v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_OpenMPDEV; v->ops->nvprint = N_VPrint_OpenMPDEV; v->ops->nvprintfile = N_VPrintFile_OpenMPDEV; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV; v->ops->nvconst = N_VConst_OpenMPDEV; v->ops->nvprod = N_VProd_OpenMPDEV; v->ops->nvdiv = N_VDiv_OpenMPDEV; v->ops->nvscale = N_VScale_OpenMPDEV; v->ops->nvabs = N_VAbs_OpenMPDEV; v->ops->nvinv = N_VInv_OpenMPDEV; v->ops->nvaddconst = N_VAddConst_OpenMPDEV; v->ops->nvdotprod = N_VDotProd_OpenMPDEV; v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV; v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV; v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV; v->ops->nvmin = N_VMin_OpenMPDEV; v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV; v->ops->nvl1norm = N_VL1Norm_OpenMPDEV; v->ops->nvcompare = N_VCompare_OpenMPDEV; v->ops->nvinvtest = N_VInvTest_OpenMPDEV; v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV; v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV; v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV; v->ops->nvminlocal = N_VMin_OpenMPDEV; v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV; v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV; v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV; v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV; v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV; /* single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = length; content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMPDEV(sunindextype length) { N_Vector v; realtype *data; realtype *dev_data; int dev; v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Update ownership */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v) = data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata) { N_Vector v; int dev, host; if (h_vdata == NULL || d_vdata == NULL) return(NULL); v = NULL; v = N_VNewEmpty_OpenMPDEV(length); if (v == NULL) return(NULL); if (length > 0) { /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Attach data */ NV_OWN_DATA_OMPDEV(v) = SUNFALSE; NV_DATA_HOST_OMPDEV(v) = h_vdata; NV_DATA_DEV_OMPDEV(v) = d_vdata; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w) { return(N_VCloneVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w) { return(N_VCloneEmptyVectorArray(count, w)); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMPDEV */ void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count) { N_VDestroyVectorArray(vs, count); return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ sunindextype N_VGetLength_OpenMPDEV(N_Vector v) { return NV_LENGTH_OMPDEV(v); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the host. */ realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_HOST_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to return a pointer to the data array on the device. */ realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v) { return((realtype *) NV_DATA_DEV_OMPDEV(v)); } /* ---------------------------------------------------------------------------- * Function to print a vector to stdout */ void N_VPrint_OpenMPDEV(N_Vector x) { N_VPrintFile_OpenMPDEV(x, stdout); } /* ---------------------------------------------------------------------------- * Function to print a vector to outfile */ void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile) { sunindextype i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMPDEV(x); xd = NV_DATA_HOST_OMPDEV(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%11.8g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* ---------------------------------------------------------------------------- * Function to copy host array into device array */ void N_VCopyToDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from host to device */ omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host); return; } /* ---------------------------------------------------------------------------- * Function to copy device array into host array */ void N_VCopyFromDevice_OpenMPDEV(N_Vector x) { int dev, host; sunindextype length; realtype *host_ptr; realtype *dev_ptr; /* Get array information */ length = NV_LENGTH_OMPDEV(x); host_ptr = NV_DATA_HOST_OMPDEV(x); dev_ptr = NV_DATA_DEV_OMPDEV(x); /* Get device and host identifiers */ dev = omp_get_default_device(); host = omp_get_initial_device(); /* Copy array from device to host */ omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w) { N_Vector v; N_VectorContent_OpenMPDEV content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(w->sunctx); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } /* Create content */ content = NULL; content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content); if (content == NULL) { N_VDestroy(v); return(NULL); } /* Attach content */ v->content = content; /* Initialize content */ content->length = NV_LENGTH_OMPDEV(w); content->own_data = SUNFALSE; content->host_data = NULL; content->dev_data = NULL; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMPDEV(N_Vector w) { N_Vector v; realtype *data; realtype *dev_data; sunindextype length; int dev; v = NULL; v = N_VCloneEmpty_OpenMPDEV(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMPDEV(w); /* Create data */ if (length > 0) { /* Update ownership flag */ NV_OWN_DATA_OMPDEV(v) = SUNTRUE; /* Allocate memory on host */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if (data == NULL) { N_VDestroy(v); return(NULL); } /* Allocate memory on device */ dev = omp_get_default_device(); dev_data = omp_target_alloc(length * sizeof(realtype), dev); if (dev_data == NULL) { N_VDestroy(v); return(NULL); } /* Attach data */ NV_DATA_HOST_OMPDEV(v)= data; NV_DATA_DEV_OMPDEV(v) = dev_data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMPDEV(N_Vector v) { int dev; if (v == NULL) return; /* free content */ if (v->content != NULL) { /* free data arrays if they are owned by the vector */ if (NV_OWN_DATA_OMPDEV(v)) { if (NV_DATA_HOST_OMPDEV(v) != NULL) { free(NV_DATA_HOST_OMPDEV(v)); NV_DATA_HOST_OMPDEV(v) = NULL; } if (NV_DATA_DEV_OMPDEV(v) != NULL) { dev = omp_get_default_device(); omp_target_free(NV_DATA_DEV_OMPDEV(v), dev); NV_DATA_DEV_OMPDEV(v) = NULL; } } free(v->content); v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw) { *lrw = NV_LENGTH_OMPDEV(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { sunindextype i, N; realtype c, *xd_dev, *yd_dev, *zd_dev; N_Vector v1, v2; booleantype test; int dev; xd_dev = yd_dev = zd_dev = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMPDEV(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMPDEV(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMPDEV(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMPDEV(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMPDEV(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMPDEV(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMPDEV(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMPDEV(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMPDEV(realtype c, N_Vector z) { sunindextype i, N; realtype *zd_dev; int dev; zd_dev = NULL; N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]*yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]/yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMPDEV(c, x); return; } if (c == ONE) { VCopy_OpenMPDEV(x, z); } else if (c == -ONE) { VNeg_OpenMPDEV(x, z); } else { N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*xd_dev[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = SUNRabs(xd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = ONE/xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y) { sunindextype i, N; realtype sum, *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; sum = ZERO; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += xd_dev[i]*yd_dev[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype max, *xd_dev; int dev; max = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:max) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1) for (i = 0; i < N; i++) { max = SUNMAX(SUNRabs(xd_dev[i]), max); } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w) { return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x)))); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a vector */ realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(sum); } /* ---------------------------------------------------------------------------- * Computes weighted square sum of a masked vector */ realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev, *idd_dev; int dev; sum = ZERO; xd_dev = wd_dev = idd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { if (idd_dev[i] > ZERO) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } } return(sum); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype min, *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(from:min) is_device_ptr(xd_dev) device(dev) #pragma omp teams num_teams(1) { min = xd_dev[0]; #pragma omp distribute parallel for reduction(min:min) schedule(static, 1) for (i = 1; i < N; i++) { min = SUNMIN(xd_dev[i], min); } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w) { sunindextype i, N; realtype sum, *xd_dev, *wd_dev; int dev; sum = ZERO; xd_dev = wd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); wd_dev = NV_DATA_DEV_OMPDEV(w); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i < N; i++) { sum += SUNSQR(xd_dev[i]*wd_dev[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMPDEV(N_Vector x) { sunindextype i, N; realtype sum, *xd_dev; int dev; sum = ZERO; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target map(tofrom:sum) is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1) for (i = 0; i<N; i++) sum += SUNRabs(xd_dev[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO; return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev, val; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); val = ZERO; #pragma omp target map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1) for (i = 0; i < N; i++) { if (xd_dev[i] == ZERO) val = ONE; else zd_dev[i] = ONE/xd_dev[i]; } if (val > ZERO) return (SUNFALSE); else return (SUNTRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m) { sunindextype i, N; realtype temp; realtype *cd_dev, *xd_dev, *md_dev; int dev; cd_dev = xd_dev = md_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); cd_dev = NV_DATA_DEV_OMPDEV(c); md_dev = NV_DATA_DEV_OMPDEV(m); /* get default device identifier */ dev = omp_get_default_device(); temp = ONE; #pragma omp target map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1) for (i = 0; i < N; i++) { md_dev[i] = ZERO; if (cd_dev[i] == ZERO) continue; if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) { if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; } continue; } if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) { if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; } } } if (temp == ONE) return (SUNTRUE); else return(SUNFALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom) { sunindextype i, N; realtype *nd_dev, *dd_dev, min; int dev; nd_dev = dd_dev = NULL; N = NV_LENGTH_OMPDEV(num); nd_dev = NV_DATA_DEV_OMPDEV(num); dd_dev = NV_DATA_DEV_OMPDEV(denom); /* get default device identifier */ dev = omp_get_default_device(); min = BIG_REAL; #pragma omp target map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev) #pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1) for (i = 0; i < N; i++) if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min); return(min); } /* * ----------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------- */ int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z) { int i, dev; realtype to_add; /* temporary variable to hold sum being added in atomic operation */ sunindextype j, N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], z); return(0); } /* should have called N_VLinearSum */ if (nvec == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(z); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store X dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* * X[0] += c[i]*X[i], i = 1,...,nvec-1 */ if ((X[0] == z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1 */ if (X[0] == z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) { #pragma omp teams distribute parallel for schedule(static,1) for (j=0; j<N; j++) zd_dev[j] *= c[0]; } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,zd_dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } /* * z = sum{ c[i] * X[i] }, i = 0,...,nvec-1 */ xd_dev = NV_DATA_DEV_OMPDEV(X[0]); #pragma omp target map(to:N,c[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) { #pragma omp teams distribute parallel for schedule(static, 1) for (j=0; j<N; j++) { zd_dev[j] = c[0] * xd_dev[j]; } } #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=1; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) { to_add = c[i] * xd_dev[j]; #pragma omp atomic zd_dev[j] += to_add; } } } free(xd_dev_ptrs); return(0); } int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a[i] * xd_dev[j]; } } free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j]; } } free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods) { int i, dev; sunindextype j, N; realtype sum; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** yd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VDotProd */ if (nvec == 1) { dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]); return(0); } /* get vector length and data array */ N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); /* initialize dot products */ for (i=0; i<nvec; i++) { dotprods[i] = ZERO; } /* Allocate and store dev pointers to copy to device */ yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); /* compute multiple dot products */ #pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute for (i=0; i<nvec; i++) { yd_dev = yd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) for (j=0; j<N; j++) sum += xd_dev[j] * yd_dev[j]; dotprods[i] += sum; } free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------- */ int N_VLinearSumVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; N_Vector* V1; N_Vector* V2; booleantype test; realtype c; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VLinearSum */ if (nvec == 1) { N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]); return(0); } /* BLAS usage: axpy y <- ax+y */ if ((b == ONE) && (Z == Y)) return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y)); /* BLAS usage: axpy x <- by+x */ if ((a == ONE) && (Z == X)) return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X)); /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z)); /* Cases: */ /* (1) a == 1.0, b = -1.0, */ /* (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { V1 = test ? Y : X; V2 = test ? X : Y; return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z)); } /* Cases: */ /* (1) a == 1.0, b == other or 0.0, */ /* (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Cases: */ /* (1) a == -1.0, b != 1.0, */ /* (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; V1 = test ? Y : X; V2 = test ? X : Y; return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z)); } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Case: a == -b */ if (a == -b) return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z)); /* Do all cases not handled above: */ /* (1) a == other, b == 0.0 - user should have called N_VScale */ /* (2) a == 0.0, b == other - user should have called N_VScale */ /* (3) a,b == other, a !=b, a != -b */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* compute linear sum for each vector pair in vector arrays */ #pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = a * xd_dev[j] + b * yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VScale */ if (nvec == 1) { N_VScale_OpenMPDEV(c[0], X[0], Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) { xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); } /* * X[i] *= c[i] */ if (X == Z) { #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) xd_dev[j] *= c[i]; } } free(xd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* * Z[i] = c[i] * X[i] */ #pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c[i] * xd_dev[j]; } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* zd_dev=NULL; realtype** zd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VConst */ if (nvec == 1) { N_VConst_OpenMPDEV(c, Z[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get device */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); /* set each vector in the vector array to a constant */ #pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c; } } free(zd_dev_ptrs); return(0); } int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]); return(0); } /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(xd_dev, wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } nrm[i] = SUNRsqrt(sum/N); } } free(wd_dev_ptrs); free(xd_dev_ptrs); return(0); } int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, N_Vector id, realtype* nrm) { int i, dev; sunindextype j, N; realtype sum; realtype* wd_dev=NULL; realtype* xd_dev=NULL; realtype* idd_dev=NULL; realtype** wd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; /* invalid number of vectors */ if (nvec < 1) return(-1); /* should have called N_VWrmsNorm */ if (nvec == 1) { nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id); return(0); } /* get vector length and mask data array */ N = NV_LENGTH_OMPDEV(X[0]); idd_dev = NV_DATA_DEV_OMPDEV(id); /* get default device identifier */ dev = omp_get_default_device(); /* initialize norms */ for (i=0; i<nvec; i++) nrm[i] = ZERO; /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]); /* compute the WRMS norm for each vector in the vector array */ #pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \ is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; wd_dev = wd_dev_ptrs[i]; sum = ZERO; #pragma omp parallel for reduction(+:sum) schedule(static, 1) { for (j=0; j<N; j++) { if (idd_dev[j] > ZERO) sum += SUNSQR(xd_dev[j] * wd_dev[j]); } } nrm[i] = SUNRsqrt(sum/N); } } free(xd_dev_ptrs); free(wd_dev_ptrs); return(0); } int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a, N_Vector* X, N_Vector** Y, N_Vector** Z) { int i, j, dev; sunindextype k, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; int retval; N_Vector* YY; N_Vector* ZZ; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VLinearSum */ if (nsum == 1) { N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]); return(0); } /* should have called N_VScaleAddMulti */ YY = (N_Vector *) malloc(nsum * sizeof(N_Vector)); ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (j=0; j<nsum; j++) { YY[j] = Y[j][0]; ZZ[j] = Z[j][0]; } retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ); free(YY); free(ZZ); return(retval); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VLinearSumVectorArray */ if (nsum == 1) { retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]); return(retval); } /* ---------------------------- * Compute multiple linear sums * ---------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]); } /* * Y[i][j] += a[i] * x[j] */ if (Y == Z) { #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) yd_dev[k] += a[j] * xd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (i=0; i<nvec; i++) { for (j=0; j<nsum; j++) zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]); } /* * Z[i][j] = Y[i][j] + a[i] * x[j] */ #pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; for (j=0; j<nsum; j++) { yd_dev = yd_dev_ptrs[i*nsum+j]; zd_dev = zd_dev_ptrs[i*nsum+j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k]; } } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { int i; /* vector arrays index in summation [0,nsum) */ int j; /* vector index in vector array [0,nvec) */ sunindextype k; /* element index in vector [0,N) */ sunindextype N; realtype* zd_dev=NULL; realtype* xd_dev=NULL; realtype** zd_dev_ptrs=NULL; realtype** xd_dev_ptrs=NULL; int dev; realtype* ctmp; N_Vector* Y; /* invalid number of vectors */ if (nvec < 1) return(-1); if (nsum < 1) return(-1); /* --------------------------- * Special cases for nvec == 1 * --------------------------- */ if (nvec == 1) { /* should have called N_VScale */ if (nsum == 1) { N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]); return(0); } /* should have called N_VLinearSum */ if (nsum == 2) { N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]); return(0); } /* should have called N_VLinearCombination */ Y = (N_Vector *) malloc(nsum * sizeof(N_Vector)); for (i=0; i<nsum; i++) { Y[i] = X[i][0]; } N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]); free(Y); return(0); } /* -------------------------- * Special cases for nvec > 1 * -------------------------- */ /* should have called N_VScaleVectorArray */ if (nsum == 1) { ctmp = (realtype*) malloc(nvec * sizeof(realtype)); for (j=0; j<nvec; j++) { ctmp[j] = c[0]; } N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z); free(ctmp); return(0); } /* should have called N_VLinearSumVectorArray */ if (nsum == 2) { N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z); return(0); } /* -------------------------- * Compute linear combination * -------------------------- */ /* get vector length */ N = NV_LENGTH_OMPDEV(Z[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*)); for (j=0; j<nvec; j++) zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]); for (j=0; j<nvec; j++) { for (i=0; i<nsum; i++) xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]); } /* * X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1 */ if ((X[0] == Z) && (c[0] == ONE)) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1 */ if (X[0] == Z) { #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] *= c[0]; for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1 */ #pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \ is_device_ptr(zd_dev) device(dev) #pragma omp teams distribute { for (j=0; j<nvec; j++) { /* scale first vector in the sum into the output vector */ xd_dev = xd_dev_ptrs[j*nsum]; zd_dev = zd_dev_ptrs[j]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] = c[0] * xd_dev[k]; /* scale and sum remaining vectors into the output vector */ for (i=1; i<nsum; i++) { xd_dev = xd_dev_ptrs[j*nsum+i]; #pragma omp parallel for schedule(static, 1) for (k=0; k<N; k++) zd_dev[k] += c[i] * xd_dev[k]; } } } free(xd_dev_ptrs); free(zd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * private functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = xd_dev[i]-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMPDEV(N_Vector x, N_Vector z) { sunindextype i, N; realtype *xd_dev, *zd_dev; int dev; xd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = -xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]+yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = c*(xd_dev[i]-yd_dev[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])+yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z) { sunindextype i, N; realtype *xd_dev, *yd_dev, *zd_dev; int dev; xd_dev = yd_dev = zd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); zd_dev = NV_DATA_DEV_OMPDEV(z); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) zd_dev[i] = (a*xd_dev[i])-yd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y) { sunindextype i, N; realtype *xd_dev, *yd_dev; int dev; xd_dev = yd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); yd_dev = NV_DATA_DEV_OMPDEV(y); /* get default device identifier */ dev = omp_get_default_device(); if (a == ONE) { #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += xd_dev[i]; return; } if (a == -ONE) { #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] -= xd_dev[i]; return; } #pragma omp target is_device_ptr(xd_dev, yd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) yd_dev[i] += a*xd_dev[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMPDEV(realtype a, N_Vector x) { sunindextype i, N; realtype *xd_dev; int dev; xd_dev = NULL; N = NV_LENGTH_OMPDEV(x); xd_dev = NV_DATA_DEV_OMPDEV(x); /* get default device identifier */ dev = omp_get_default_device(); #pragma omp target is_device_ptr(xd_dev) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (i = 0; i < N; i++) xd_dev[i] *= a; return; } /* * ----------------------------------------------------------------- * private functions for special cases of vector array operations * ----------------------------------------------------------------- */ static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = xd_dev[j] - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] + yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev ointer to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = c * (xd_dev[j] - yd_dev[j]); } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) + yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype* zd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; realtype** zd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); for (i=0; i<nvec; i++) zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]); #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; zd_dev = zd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) zd_dev[j] = (a * xd_dev[j]) - yd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); free(zd_dev_ptrs); return(0); } static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y) { int i, dev; sunindextype j, N; realtype* xd_dev=NULL; realtype* yd_dev=NULL; realtype** xd_dev_ptrs=NULL; realtype** yd_dev_ptrs=NULL; N = NV_LENGTH_OMPDEV(X[0]); /* get default device identifier */ dev = omp_get_default_device(); /* Allocate and store dev pointers to copy to device */ xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*)); for (i=0; i<nvec; i++) xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]); for (i=0; i<nvec; i++) yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]); if (a == ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } if (a == -ONE) { #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] -= xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } #pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \ is_device_ptr(xd_dev,yd_dev) device(dev) #pragma omp teams distribute { for (i=0; i<nvec; i++) { xd_dev = xd_dev_ptrs[i]; yd_dev = yd_dev_ptrs[i]; #pragma omp parallel for schedule(static, 1) for (j=0; j<N; j++) yd_dev[j] += a * xd_dev[j]; } } free(xd_dev_ptrs); free(yd_dev_ptrs); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; /* enable single buffer reduction operations */ v->ops->nvdotprodmultilocal = N_VDotProdMultiLocal_OpenMPDEV; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; /* disable single buffer reduction operations */ v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) { v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV; v->ops->nvdotprodmultilocal = N_VDotProdMulti_OpenMPDEV; } else { v->ops->nvdotprodmulti = NULL; v->ops->nvdotprodmultilocal = NULL; } /* return success */ return(0); } int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV; else v->ops->nvwrmsnormvectorarray = NULL; /* return success */ return(0); } int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV; else v->ops->nvwrmsnormmaskvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); }
4963_so4.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 64) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.5F * usol[t1][x - time + 4][y - time + 4][z + 4]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]); usol[t0][x - time + 4][y - time + 4][z + 4] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 4][y - time + 4][z + 4] + usol[t2][x - time + 4][y - time + 4][z + 4])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 4][y - time + 4][z + 4]) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 4][z + 2] + usol[t1][x - time + 4][y - time + 4][z + 6]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 4][z + 3] + usol[t1][x - time + 4][y - time + 4][z + 5])) / ((h_z * h_z)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 2][z + 4] + usol[t1][x - time + 4][y - time + 6][z + 4]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 3][z + 4] + usol[t1][x - time + 4][y - time + 5][z + 4])) / ((h_y * h_y)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 2][y - time + 4][z + 4] + usol[t1][x - time + 6][y - time + 4][z + 4]) + 1.33333333F * (usol[t1][x - time + 3][y - time + 4][z + 4] + usol[t1][x - time + 5][y - time + 4][z + 4])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 64) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; return 0; }
simple_pk.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <limits.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" #ifdef USE_OPENMP #include <omp.h> #else #define omp_get_num_threads() 1 #endif /* * write a grib-2 file * * sec0..sec4 predefined sections 0 to 4 * data[] = values to encode into grib * ndata = size of data * out = output file * */ int simple_grib_out(unsigned char **sec, float *data, unsigned int ndata, int use_scale, int dec_scale, int bin_scale, int wanted_bits, int max_bits, struct seq_file *out) { unsigned int n_defined; int i; unsigned char *sec0, *sec1, *sec2 , *sec3, *sec4, *sec5, *sec6, *sec7; /* required passed sections */ sec0 = sec[0]; sec1 = sec[1]; sec2 = sec[2]; sec3 = sec[3]; sec4 = sec[4]; /* make a sections 5-7 */ n_defined = ndata; sec6 = mk_bms(data, &n_defined); // make bitmap section mk_sec5and7(data, n_defined, &sec5, &sec7,use_scale,dec_scale,bin_scale, wanted_bits, max_bits); // make sec 5 and 7 i = wrt_sec(sec0, sec1, sec2, sec3, sec4, sec5, sec6, sec7, out); free(sec5); free(sec6); free(sec7); return i; } /* * make sec 5 and 7 using simple packing */ int mk_sec5and7(float *data, unsigned int n, unsigned char **sec5, unsigned char **sec7, int use_scale, int dec_scale, int bin_scale, int wanted_bits, int max_bits) { float min_val, max_val, ncep_min_val; int nbits, binary_scale, j; double ref, frange, scale, dec_factor; size_t sec5_size, sec7_size; unsigned char *p; unsigned int i, k, di; int nthreads; binary_scale = bin_scale; if (n == 0) { // all undefined nbits = 0; ref = ncep_min_val = 0.0; } else { min_max_array_all_defined(data, n, &min_val, &max_val); ncep_min_val = min_val; if (use_scale == 0) { /* ecmwf style */ ref = min_val; frange = max_val - ref; dec_scale = 0; if (frange != 0.0) { frexp(frange, &j); binary_scale = j - wanted_bits; nbits = wanted_bits; scale = ldexp(1.0, -binary_scale); frange = floor((max_val-ref)*scale + 0.5); frexp(frange, &j); if (j != nbits) binary_scale++; } else { binary_scale = nbits = 0; scale = 1; } } else { if (dec_scale) { dec_factor = Int_Power(10.0, -dec_scale); min_val *= dec_factor; max_val *= dec_factor; #pragma omp parallel for for (i = 0; i < n; i++) { data[i] *= dec_factor; } } scale = ldexp(1.0, -binary_scale); ref = min_val; frange = floor ((max_val - ref)*scale + 0.5); frexp(frange, &nbits); if (nbits > max_bits) { binary_scale += (nbits - max_bits); nbits = max_bits; } } /* scale data by ref, binary_scale and dec_scale */ if (binary_scale) { scale = ldexp(1.0, -binary_scale); #pragma omp parallel for for (i = 0; i < n; i++) { data[i] = (data[i] - ref)*scale; } } else { #pragma omp parallel for for (i = 0; i < n; i++) { data[i] = data[i] - ref; } } } sec5_size = 21; sec7_size = 5 + (nbits * (n / 8)) + (nbits * (n % 8) + 7) / 8; // section 7 *sec7 = p = (unsigned char *) malloc(sec7_size); if (p == NULL) fatal_error("mk_sec5and7: memory allocation",""); uint_char(sec7_size, p); p[4] = 7; if (n != 0) { // single thread version // flist2bitstream(data,p + 5,n,nbits); // flist2bitstream can run in parallel if the loop has // increments of 8. Then each conversion to a bitstream // starts on a byte boundary. #pragma omp parallel private(i,k) { #pragma omp single { nthreads = omp_get_num_threads(); di = (n + nthreads - 1) / nthreads; di = ((di + 7) | 7) ^ 7; } #pragma omp for for (i = 0; i < n; i+= di) { k = n - i; if (k > di) k = di; flist2bitstream(data + i, p + 5 + (i/8)*nbits, k, nbits); } } } // section 5 // fix for buggy NCEP decoders // for constant fields, they ignore the decimal scaling if (nbits == 0) { dec_scale = binary_scale = 0; ref = ncep_min_val; } *sec5 = p = (unsigned char *) malloc(sec5_size); if (p == NULL) fatal_error("mk_sec5and7: memory allocation",""); uint_char(sec5_size, p); // length of section 5 p[4] = 5; // section 5 uint_char(n, p+5); // number of defined points uint2_char(0,p+9); // template 5.0 flt2ieee(ref,p+11); // ieee reference value int2_char(binary_scale,p+15); int2_char(-dec_scale,p+17); p[19] = nbits; p[20] = 0; // template 5.1 - set to floating return 0; }
GB_unaryop__abs_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint8 // op(A') function: GB_tran__abs_fp64_uint8 // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint8 ( double *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4to1_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4a-inch/4a-64-outch; #if __aarch64__ kernel_tm_pack4.create(8 * inch / 4, 64, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)4u * 4, 4); #else kernel_tm_pack4.create(4 * inch / 4, 64, outch / 4 + outch % 4, (size_t)4u * 4, 4); #endif int p = 0; #if __aarch64__ for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack4.channel(p / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); const float* k40 = k4.row(q); const float* k41 = k4.row(q + 1); const float* k42 = k4.row(q + 2); const float* k43 = k4.row(q + 3); const float* k50 = k5.row(q); const float* k51 = k5.row(q + 1); const float* k52 = k5.row(q + 2); const float* k53 = k5.row(q + 3); const float* k60 = k6.row(q); const float* k61 = k6.row(q + 1); const float* k62 = k6.row(q + 2); const float* k63 = k6.row(q + 3); const float* k70 = k7.row(q); const float* k71 = k7.row(q + 1); const float* k72 = k7.row(q + 2); const float* k73 = k7.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4 + p % 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k02[k]; g00[3] = k03[k]; g00 += 4; } } } } static void conv3x3s1_winograd63_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n" "sub %0, %0, #128 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v19.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #64 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d16-d19}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d20-d23}, [%0 :128] \n" "sub %0, %0, #96 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "vst1.f32 {d22-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128] \n" "sub %0, %0, #32 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" "st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n" "st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n" "st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n" "st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" "st1 {v18.4s}, [%3], #16 \n" "st1 {v19.4s}, [%4], #16 \n" "st1 {v20.4s}, [%5], #16 \n" "st1 {v21.4s}, [%6], #16 \n" "st1 {v22.4s}, [%7], #16 \n" "st1 {v23.4s}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4s}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.s}[0], [%1], #4 \n" "st1 {v16.s}[1], [%2], #4 \n" "st1 {v16.s}[2], [%3], #4 \n" "st1 {v16.s}[3], [%4], #4 \n" "st1 {v17.s}[0], [%5], #4 \n" "st1 {v17.s}[1], [%6], #4 \n" "st1 {v17.s}[2], [%7], #4 \n" "st1 {v17.s}[3], [%8], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); #if __aarch64__ const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel01_tm = kernel_tm.channel(p / 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1]! \n" "vst1.f32 {d20-d23}, [%2]! \n" "vst1.f32 {d24-d27}, [%3]! \n" "vst1.f32 {d28-d31}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.s}[0], [%1], #4 \n" "st1 {v8.s}[1], [%2], #4 \n" "st1 {v8.s}[2], [%3], #4 \n" "st1 {v8.s}[3], [%4], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16[0]}, [%1]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vst1.f32 {d17[0]}, [%3]! \n" "vst1.f32 {d17[1]}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16-d17}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel0_tm.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < inch; q++) { float32x4_t _r0 = vld1q_f32(r0); float32x4_t _k0 = vld1q_f32(kptr); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; r0 += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0 + 4); float32x4_t _k02_0 = vld1q_f32(k0 + 8); float32x4_t _k10_0 = vld1q_f32(k0 + 12); float32x4_t _k11_0 = vld1q_f32(k0 + 16); float32x4_t _k12_0 = vld1q_f32(k0 + 20); float32x4_t _k20_0 = vld1q_f32(k0 + 24); float32x4_t _k21_0 = vld1q_f32(k0 + 28); float32x4_t _k22_0 = vld1q_f32(k0 + 32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1 + 4); float32x4_t _k02_1 = vld1q_f32(k1 + 8); float32x4_t _k10_1 = vld1q_f32(k1 + 12); float32x4_t _k11_1 = vld1q_f32(k1 + 16); float32x4_t _k12_1 = vld1q_f32(k1 + 20); float32x4_t _k20_1 = vld1q_f32(k1 + 24); float32x4_t _k21_1 = vld1q_f32(k1 + 28); float32x4_t _k22_1 = vld1q_f32(k1 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4s, v5.4s}, [%2] \n" // r04 r05 "fmul v6.4s, %10.4s, v2.4s \n" "fmul v7.4s, %19.4s, v2.4s \n" "fmul v8.4s, %10.4s, v3.4s \n" "fmul v9.4s, %19.4s, v3.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "fmla v6.4s, %11.4s, v3.4s \n" "fmla v7.4s, %20.4s, v3.4s \n" "fmla v8.4s, %11.4s, v4.4s \n" "fmla v9.4s, %20.4s, v4.4s \n" "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r10 r11 r12 r12 "fmla v6.4s, %12.4s, v4.4s \n" "fmla v7.4s, %21.4s, v4.4s \n" "fmla v8.4s, %12.4s, v5.4s \n" "fmla v9.4s, %21.4s, v5.4s \n" "fmla v16.4s, %13.4s, v0.4s \n" "fmla v17.4s, %22.4s, v0.4s \n" "fmla v18.4s, %13.4s, v1.4s \n" "fmla v19.4s, %22.4s, v1.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4s, v5.4s}, [%3] \n" // r14 r15 "fmla v6.4s, %13.4s, v2.4s \n" "fmla v7.4s, %22.4s, v2.4s \n" "fmla v8.4s, %13.4s, v3.4s \n" "fmla v9.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v1.4s \n" "fmla v17.4s, %23.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %23.4s, v2.4s \n" "fmla v6.4s, %14.4s, v3.4s \n" "fmla v7.4s, %23.4s, v3.4s \n" "fmla v8.4s, %14.4s, v4.4s \n" "fmla v9.4s, %23.4s, v4.4s \n" "fmla v16.4s, %15.4s, v2.4s \n" "fmla v17.4s, %24.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %24.4s, v3.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n" // r20 r21 r22 r22 "fmla v6.4s, %15.4s, v4.4s \n" "fmla v7.4s, %24.4s, v4.4s \n" "fmla v8.4s, %15.4s, v5.4s \n" "fmla v9.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4] \n" // r24 r25 "fmla v6.4s, %16.4s, v2.4s \n" "fmla v7.4s, %25.4s, v2.4s \n" "fmla v8.4s, %16.4s, v3.4s \n" "fmla v9.4s, %25.4s, v3.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v6.4s, %17.4s, v3.4s \n" "fmla v7.4s, %26.4s, v3.4s \n" "fmla v8.4s, %17.4s, v4.4s \n" "fmla v9.4s, %26.4s, v4.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "fmla v6.4s, %18.4s, v4.4s \n" "fmla v7.4s, %27.4s, v4.4s \n" "fmla v8.4s, %18.4s, v5.4s \n" "fmla v9.4s, %27.4s, v5.4s \n" "ld1 {v0.4s}, [%0] \n" // sum00 sum01 sum02 sum03 "ld1 {v1.4s}, [%1] \n" // sum10 sum11 sum12 sum13 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "faddp v6.4s, v6.4s, v6.4s \n" "faddp v7.4s, v7.4s, v7.4s \n" "faddp v8.4s, v8.4s, v8.4s \n" "faddp v9.4s, v9.4s, v9.4s \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "faddp v6.2s, v6.2s, v8.2s \n" "faddp v7.2s, v7.2s, v9.2s \n" "trn1 v16.2d, v16.2d, v6.2d \n" "trn1 v17.2d, v17.2d, v7.2d \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v17.4s \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2] \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n" // r10 r11 r12 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "fmla v16.4s, %13.4s, v4.4s \n" "fmla v17.4s, %22.4s, v4.4s \n" "fmla v18.4s, %13.4s, v5.4s \n" "fmla v19.4s, %22.4s, v5.4s \n" "fmla v16.4s, %14.4s, v5.4s \n" "fmla v17.4s, %23.4s, v5.4s \n" "fmla v18.4s, %14.4s, v6.4s \n" "fmla v19.4s, %23.4s, v6.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4] \n" // r20 r21 r22 r22 "fmla v16.4s, %15.4s, v6.4s \n" "fmla v17.4s, %24.4s, v6.4s \n" "fmla v18.4s, %15.4s, v7.4s \n" "fmla v19.4s, %24.4s, v7.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "ld1 {v4.2s}, [%0] \n" // sum00 sum01 "ld1 {v5.2s}, [%1] \n" // sum10 sum11 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "add %2, %2, #32 \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "add %3, %3, #32 \n" "fadd v4.2s, v4.2s, v16.2s \n" "fadd v5.2s, v5.2s, v17.2s \n" "add %4, %4, #32 \n" "st1 {v4.2s}, [%0], #8 \n" "st1 {v5.2s}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%2, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%2] \n" // r00 r01 r02 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %11.4s, v1.4s \n" "fmul v19.4s, %20.4s, v1.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%3] \n" // r10 r11 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %13.4s, v3.4s \n" "fmla v19.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v4.4s \n" "fmla v17.4s, %23.4s, v4.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%4] \n" // r20 r21 r22 "fmla v18.4s, %15.4s, v5.4s \n" "fmla v19.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %17.4s, v1.4s \n" "fmla v19.4s, %26.4s, v1.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "ld1 {v3.s}[0], [%0] \n" // sum00 "ld1 {v4.s}[0], [%1] \n" // sum10 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %2, %2, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "add %3, %3, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "faddp v17.2s, v17.2s, v17.2s \n" "add %4, %4, #16 \n" "fadd v3.2s, v3.2s, v16.2s \n" "fadd v4.2s, v4.2s, v17.2s \n" "st1 {v3.s}[0], [%0], #4 \n" "st1 {v4.s}[0], [%1], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19"); } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out0.fill(bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmul v20.4s, %8.4s, v4.4s \n" "fmul v21.4s, %8.4s, v5.4s \n" "fmul v22.4s, %8.4s, v6.4s \n" "fmul v23.4s, %8.4s, v7.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r08 r09 "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v4.4s \n" "fmla v20.4s, %9.4s, v5.4s \n" "fmla v21.4s, %9.4s, v6.4s \n" "fmla v22.4s, %9.4s, v7.4s \n" "fmla v23.4s, %9.4s, v8.4s \n" "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v4.4s \n" "fmla v19.4s, %10.4s, v5.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v6.4s \n" "fmla v21.4s, %10.4s, v7.4s \n" "fmla v22.4s, %10.4s, v8.4s \n" "fmla v23.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v16.4s, %11.4s, v0.4s \n" "fmla v17.4s, %11.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %11.4s, v3.4s \n" "fmla v20.4s, %11.4s, v4.4s \n" "fmla v21.4s, %11.4s, v5.4s \n" "fmla v22.4s, %11.4s, v6.4s \n" "fmla v23.4s, %11.4s, v7.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r18 r19 "fmla v16.4s, %12.4s, v1.4s \n" "fmla v17.4s, %12.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %12.4s, v4.4s \n" "fmla v20.4s, %12.4s, v5.4s \n" "fmla v21.4s, %12.4s, v6.4s \n" "fmla v22.4s, %12.4s, v7.4s \n" "fmla v23.4s, %12.4s, v8.4s \n" "fmla v16.4s, %13.4s, v2.4s \n" "fmla v17.4s, %13.4s, v3.4s \n" "fmla v18.4s, %13.4s, v4.4s \n" "fmla v19.4s, %13.4s, v5.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, %13.4s, v6.4s \n" "fmla v21.4s, %13.4s, v7.4s \n" "fmla v22.4s, %13.4s, v8.4s \n" "fmla v23.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v20.4s, %14.4s, v4.4s \n" "fmla v21.4s, %14.4s, v5.4s \n" "fmla v22.4s, %14.4s, v6.4s \n" "fmla v23.4s, %14.4s, v7.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r28 r29 "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v4.4s \n" "fmla v20.4s, %15.4s, v5.4s \n" "fmla v21.4s, %15.4s, v6.4s \n" "fmla v22.4s, %15.4s, v7.4s \n" "fmla v23.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v4.4s \n" "fmla v19.4s, %16.4s, v5.4s \n" "fmla v20.4s, %16.4s, v6.4s \n" "fmla v21.4s, %16.4s, v7.4s \n" "fmla v22.4s, %16.4s, v8.4s \n" "fmla v23.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" // sum0 sum1 sum2 sum3 sum4 sum5 sum6 sum7 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v20.4s, v20.4s, v21.4s \n" "faddp v22.4s, v22.4s, v23.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "faddp v20.4s, v20.4s, v22.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v20.4s \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r04 r05 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v8.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v8.4s \n" "fmla v19.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r14 r15 "fmla v16.4s, %11.4s, v4.4s \n" "fmla v17.4s, %11.4s, v5.4s \n" "fmla v18.4s, %11.4s, v6.4s \n" "fmla v19.4s, %11.4s, v7.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "fmla v18.4s, %12.4s, v7.4s \n" "fmla v19.4s, %12.4s, v8.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v16.4s, %13.4s, v6.4s \n" "fmla v17.4s, %13.4s, v7.4s \n" "fmla v18.4s, %13.4s, v8.4s \n" "fmla v19.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r24 r25 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v8.4s \n" "fmla v19.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q3, %q8, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128]! \n" // r02 "vmul.f32 q4, %q8, q1 \n" "vmla.f32 q3, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r03 r04 "vmul.f32 q5, %q8, q2 \n" "vmla.f32 q4, %q9, q2 \n" "vmla.f32 q3, %q10, q2 \n" "vmul.f32 q6, %q8, q0 \n" "vmla.f32 q5, %q9, q0 \n" "vmla.f32 q4, %q10, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128] \n" // r05 "vmla.f32 q6, %q9, q1 \n" "vmla.f32 q5, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q6, %q10, q2 \n" "vmla.f32 q3, %q11, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128]! \n" // r12 "vmla.f32 q4, %q11, q1 \n" "vmla.f32 q3, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r13 r14 "vmla.f32 q5, %q11, q2 \n" "vmla.f32 q4, %q12, q2 \n" "vmla.f32 q3, %q13, q2 \n" "vmla.f32 q6, %q11, q0 \n" "vmla.f32 q5, %q12, q0 \n" "vmla.f32 q4, %q13, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128] \n" // r15 "vmla.f32 q6, %q12, q1 \n" "vmla.f32 q5, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q6, %q13, q2 \n" "vmla.f32 q3, %q14, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128]! \n" // r22 "vmla.f32 q4, %q14, q1 \n" "vmla.f32 q3, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r23 r24 "vmla.f32 q5, %q14, q2 \n" "vmla.f32 q4, %q15, q2 \n" "vmla.f32 q3, %q16, q2 \n" "vmla.f32 q6, %q14, q0 \n" "vmla.f32 q5, %q15, q0 \n" "vmla.f32 q4, %q16, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128] \n" // r25 "vmla.f32 q6, %q15, q1 \n" "vmla.f32 q5, %q16, q1 \n" "vld1.f32 {d0-d1}, [%0] \n" // sum0 sum1 sum2 sum3 "vmla.f32 q6, %q16, q2 \n" "vadd.f32 d6, d6, d7 \n" "vadd.f32 d8, d8, d9 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "sub %1, %1, #16 \n" "vpadd.f32 d6, d6, d8 \n" "vpadd.f32 d7, d10, d12 \n" "sub %2, %2, #16 \n" "vadd.f32 q0, q0, q3 \n" "sub %3, %3, #16 \n" "vst1.f32 {d0-d1}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n" // r00 r01 r02 r03 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "fmul v19.4s, %9.4s, v2.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %11.4s, v4.4s \n" "fmla v19.4s, %11.4s, v5.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v6.4s \n" "fmla v19.4s, %13.4s, v7.4s \n" "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v19.4s, %15.4s, v2.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "ld1 {v0.2s}, [%0] \n" // sum0 sum1 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %1, %1, #32 \n" "faddp v16.4s, v16.4s, v17.4s \n" "add %2, %2, #32 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %3, %3, #32 \n" "fadd v0.2s, v0.2s, v16.2s \n" "st1 {v0.2s}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q5, %q8, q0 \n" "vmul.f32 q6, %q8, q1 \n" "vmul.f32 q2, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" // r02 r03 "vmul.f32 q3, %q9, q0 \n" "vmla.f32 q5, %q10, q0 \n" "vmla.f32 q6, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q2, %q11, q0 \n" "vmla.f32 q3, %q11, q1 \n" "vmla.f32 q5, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128] \n" // r12 r13 "vmla.f32 q6, %q12, q0 \n" "vmla.f32 q2, %q13, q0 \n" "vmla.f32 q3, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q5, %q14, q0 \n" "vmla.f32 q6, %q14, q1 \n" "vmla.f32 q2, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128] \n" // r22 r23 "vmla.f32 q3, %q15, q0 \n" "vmla.f32 q5, %q16, q0 \n" "vmla.f32 q6, %q16, q1 \n" "vld1.f32 {d8}, [%0] \n" // sum0 sum1 "vadd.f32 q5, q5, q2 \n" "vadd.f32 q6, q6, q3 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "vpadd.f32 d10, d10, d12 \n" "vadd.f32 d8, d8, d10 \n" "vst1.f32 {d8}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "eor v16.16b, v16.16b, v16.16b \n" "ld1 {v16.s}[0], [%0] \n" // sum0 "fmul v17.4s, %8.4s, v0.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %11.4s, v3.4s \n" "fmla v18.4s, %12.4s, v4.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v16.4s, %13.4s, v5.4s \n" "fmla v17.4s, %14.4s, v0.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fadd v17.4s, v17.4s, v18.4s \n" "fadd v16.4s, v16.4s, v17.4s \n" "add %1, %1, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %2, %2, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "add %3, %3, #16 \n" "st1 {v16.s}[0], [%0], #4 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18"); #else // __aarch64__ asm volatile( "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "veor q3, q3 \n" "vld1.f32 {d6[0]}, [%0] \n" // sum0 "vmul.f32 q4, %q8, q0 \n" "vmul.f32 q5, %q9, q1 \n" "vmla.f32 q3, %q10, q2 \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "vmla.f32 q4, %q11, q0 \n" "vmla.f32 q5, %q12, q1 \n" "vmla.f32 q3, %q13, q2 \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "vmla.f32 q4, %q14, q0 \n" "vmla.f32 q5, %q15, q1 \n" "vmla.f32 q3, %q16, q2 \n" "vadd.f32 q4, q4, q5 \n" "vadd.f32 q3, q3, q4 \n" "add %1, %1, #16 \n" "vadd.f32 d6, d6, d7 \n" "add %2, %2, #16 \n" "vpadd.f32 d6, d6, d6 \n" "add %3, %3, #16 \n" "vst1.f32 {d6[0]}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif // __aarch64__ } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; } } }
GB_binop__min_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__min_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp32) // A*D function (colscale): GB (_AxD__min_fp32) // D*A function (rowscale): GB (_DxB__min_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__min_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__min_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp32) // C=scalar+B GB (_bind1st__min_fp32) // C=scalar+B' GB (_bind1st_tran__min_fp32) // C=A+scalar GB (_bind2nd__min_fp32) // C=A'+scalar GB (_bind2nd_tran__min_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = fminf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fminf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP32 || GxB_NO_MIN_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = fminf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = fminf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fminf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = fminf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residual_based_adjoint_bossak_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED) #define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED // System includes #include <vector> #include <string> #include <unordered_set> #include <functional> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/kratos_parameters.h" #include "solving_strategies/schemes/scheme.h" #include "response_functions/adjoint_response_function.h" #include "utilities/variable_utils.h" #include "utilities/indirect_scalar.h" #include "utilities/adjoint_extensions.h" namespace Kratos { ///@name Kratos Classes ///@{ /// A scheme for dynamic adjoint equations, using Bossak time integration. /** * It can be used for either first- or second-order time derivatives. Elements * and conditions must provide a specialization of AdjointExtensions via their * data value container, which allows the scheme to operate independently of * the variable arrangements in the element or condition. */ template <class TSparseSpace, class TDenseSpace> class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. ResidualBasedAdjointBossakScheme(Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction) : mpResponseFunction(pResponseFunction) { Parameters default_parameters(R"({ "scheme_type": "bossak", "alpha_bossak": -0.3 })"); Settings.ValidateAndAssignDefaults(default_parameters); mBossak.Alpha = Settings["alpha_bossak"].GetDouble(); } /// Destructor. ~ResidualBasedAdjointBossakScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory. int num_threads = OpenMPUtils::GetNumThreads(); mLeftHandSide.resize(num_threads); mResponseGradient.resize(num_threads); mFirstDerivsLHS.resize(num_threads); mFirstDerivsResponseGradient.resize(num_threads); mSecondDerivsLHS.resize(num_threads); mSecondDerivsResponseGradient.resize(num_threads); mAdjointValuesVector.resize(num_threads); mAdjointIndirectVector2.resize(num_threads); mAdjointIndirectVector3.resize(num_threads); mAuxAdjointIndirectVector1.resize(num_threads); InitializeNodeNeighbourCount(rModelPart.Nodes()); KRATOS_CATCH(""); } void InitializeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); const auto& r_current_process_info = rModelPart.GetProcessInfo(); mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info)); this->CalculateNodeNeighbourCount(rModelPart); KRATOS_CATCH(""); } void FinalizeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); this->UpdateAuxiliaryVariable(rModelPart); KRATOS_CATCH(""); } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Update degrees of freedom: adjoint variables associated to the // residual of the physical problem. this->mpDofUpdater->UpdateDofs(rDofSet, rDx); // Update adjoint variables associated to time integration. this->UpdateTimeSchemeAdjoints(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions(Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; auto& r_current_element = *pCurrentElement; const auto k = OpenMPUtils::ThisThread(); r_current_element.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculatePreviousTimeStepContributions( r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateResidualLocalContributions( r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); r_current_element.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Calculate_LHS_Contribution(Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(pCurrentElement, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; // NOT TESTED !!! pCurrentCondition->CalculateLocalSystem( rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); KRATOS_CATCH(""); } void Condition_Calculate_LHS_Contribution(Condition::Pointer pCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; Condition_CalculateSystemContributions(pCurrentCondition, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedAdjointBossakScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: struct BossakConstants { double Alpha; double Beta; double Gamma; double C0; double C1; double C2; double C3; double C4; double C5; double C6; double C7; }; ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ BossakConstants mBossak; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemMatrixType> mLeftHandSide; std::vector<LocalSystemVectorType> mResponseGradient; std::vector<LocalSystemMatrixType> mFirstDerivsLHS; std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient; std::vector<LocalSystemMatrixType> mSecondDerivsLHS; std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient; std::vector<LocalSystemVectorType> mAdjointValuesVector; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3; std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void CalculateGradientContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo); this->mpResponseFunction->CalculateGradient( rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) = mLeftHandSide[k]; noalias(rRHS_Contribution) = -1. * mResponseGradient[k]; } void CalculateFirstDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo); mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentElement, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C6 * mFirstDerivsResponseGradient[k]; } void CalculateSecondDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_response_function = *(this->mpResponseFunction); rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); r_response_function.CalculateSecondDerivativesGradient( rCurrentElement, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C7 * mSecondDerivsResponseGradient[k]; } void CalculatePreviousTimeStepContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, ProcessInfo& rCurrentProcessInfo) { const auto& r_geometry = rCurrentElement.GetGeometry(); const auto k = OpenMPUtils::ThisThread(); auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS); unsigned local_index = 0; for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { rRHS_Contribution[local_index] += weight * (mBossak.C7 * mAuxAdjointIndirectVector1[k][d] + mBossak.C4 * mAdjointIndirectVector2[k][d] + mBossak.C5 * mAdjointIndirectVector3[k][d]); ++local_index; } } } void CalculateResidualLocalContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_residual_adjoint = mAdjointValuesVector[k]; rCurrentElement.GetValuesVector(r_residual_adjoint); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint); } void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes) { // This loop should not be omp parallel // The operation is not threadsafe if the value is uninitialized for (auto& r_node : rNodes) r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } void CalculateNodeNeighbourCount(ModelPart& rModelPart) { // Calculate number of neighbour elements for each node. const int num_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < num_nodes; ++i) { Node<3>& r_node = *(rModelPart.Nodes().begin() + i); r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } const int num_elements = rModelPart.NumberOfElements(); #pragma omp parallel for for (int i = 0; i < num_elements; ++i) { Element& r_element = *(rModelPart.Elements().begin() + i); Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j) { double& r_num_neighbour = r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); #pragma omp atomic r_num_neighbour += 1.0; } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); } void UpdateTimeSchemeAdjoints(ModelPart& rModelPart) { KRATOS_TRY; auto lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); auto lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes()); SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes()); const int number_of_elements = rModelPart.NumberOfElements(); ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector adjoint2_aux, adjoint3_aux; std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old; #pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info); this->mpResponseFunction->CalculateFirstDerivativesGradient( r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size()) adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false); noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] - prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]); if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size()) adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false); noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] - prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]); auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { r_extensions.GetFirstDerivativesVector( i_node, mAdjointIndirectVector2[k], 0); r_extensions.GetSecondDerivativesVector( i_node, mAdjointIndirectVector3[k], 0); r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1); r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); Node<3>& r_node = r_geometry[i_node]; const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); r_node.SetLock(); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index]; mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d]; mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index]; mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d]; mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += weight * mAuxAdjointIndirectVector1[k][d]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator()); Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void UpdateAuxiliaryVariable(ModelPart& rModelPart) { KRATOS_TRY; auto aux_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rOut) { return rExtensions.GetAuxiliaryVariables(rOut); }); SetToZero_AdjointVars(aux_vars, rModelPart.Nodes()); // Loop over elements to assemble the remaining terms const int number_of_elements = rModelPart.NumberOfElements(); ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector aux_adjoint_vector; #pragma omp parallel for private(aux_adjoint_vector) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= mBossak.Alpha; this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1()) aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false); noalias(aux_adjoint_vector) = prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) + mSecondDerivsResponseGradient[k]; auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { Node<3>& r_node = r_geometry[i_node]; r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0); r_node.SetLock(); for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d) { mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void CheckAndResizeThreadStorage(unsigned SystemSize) { const int k = OpenMPUtils::ThisThread(); if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize) { mLeftHandSide[k].resize(SystemSize, SystemSize, false); } if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize) { mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize) { mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mResponseGradient[k].size() != SystemSize) { mResponseGradient[k].resize(SystemSize, false); } if (mFirstDerivsResponseGradient[k].size() != SystemSize) { mFirstDerivsResponseGradient[k].resize(SystemSize, false); } if (mSecondDerivsResponseGradient[k].size() != SystemSize) { mSecondDerivsResponseGradient[k].resize(SystemSize, false); } } static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime) { BossakConstants bc; bc.Alpha = Alpha; bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha); bc.Gamma = 0.5 - bc.Alpha; bc.C0 = 1.0 - bc.Gamma / bc.Beta; bc.C1 = -1.0 / (bc.Beta * DeltaTime); bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime; bc.C3 = (1.0 - 0.5 / bc.Beta); bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta); bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta); bc.C6 = bc.Gamma / (bc.Beta * DeltaTime); bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta); return bc; } static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo) { const ProcessInfo& r_last_process_info = rCurrentProcessInfo.GetPreviousSolutionStepInfo(1); // Note: solution is backwards in time, but we still want a positive // time step // (it is the time step in the "forward" Bossak scheme). double time_step = r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME); KRATOS_ERROR_IF(time_step <= 0.0) << "Backwards in time solution is not decreasing time from last " "step." << std::endl; return time_step; } struct Hash { std::size_t operator()(const VariableData* const& p) const { return p->Key(); } }; struct Pred { bool operator()(const VariableData* const l, const VariableData* const r) const { return *l == *r; } }; // Gathers variables needed for assembly. static std::vector<const VariableData*> GatherVariables( const ModelPart::ElementsContainerType& rElements, std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars) { KRATOS_TRY; const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<const VariableData*> local_vars; std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads); #pragma omp parallel for private(local_vars) for (int i = 0; i < static_cast<int>(rElements.size()); ++i) { auto& r_element = *(rElements.begin() + i); GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars); const int k = OpenMPUtils::ThisThread(); thread_vars[k].insert(local_vars.begin(), local_vars.end()); } std::unordered_set<const VariableData*, Hash, Pred> all_vars; for (int i = 0; i < num_threads; ++i) { all_vars.insert(thread_vars[i].begin(), thread_vars[i].end()); } return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()}; KRATOS_CATCH(""); } static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); VariableUtils().SetToZero_VectorVar(r_variable, rNodes); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); VariableUtils().SetToZero_ScalarVar(r_variable, rNodes); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables, Communicator& rComm) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedAdjointBossakScheme */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
ttask.c
#include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <pthread.h> #include <stdbool.h> #include <string.h> #include <omp.h> /* OpenMP */ volatile long result=0; void foo() { #pragma omp parallel #pragma omp single { int argum = 1; #pragma omp task shared(result) firstprivate(argum) for (long i = 0; i < 10; i++) { #pragma omp atomic result += argum; } for (long i = 0; i < 10; i++) { argum++; #pragma omp task shared(result) firstprivate(argum) #pragma omp atomic result += argum; } #pragma omp taskwait #pragma omp task shared(result) firstprivate(argum) printf("Hello from third task, up to now result=%ld and argum = %d\n", result, argum); } } int main(int argc, char *argv[]) { foo(); printf("Back in main ... result = %ld\n", result); }
matmul.c
#include <stdlib.h> #include <sys/time.h> #include <stdio.h> #include <math.h> //#define _OPENACCM #include <openacc.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef _N_ #define _N_ 512 #endif #ifndef VERIFICATION #define VERIFICATION 1 #endif int N = _N_; int M = _N_; int P = _N_; double my_timer () { struct timeval time; gettimeofday (&time, 0); return time.tv_sec + time.tv_usec / 1000000.0; } void MatrixMultiplication_openacc(float * a, float * b, float * c) { int i, j, k ; float cSum; #ifdef _OPENACCM acc_init(acc_device_default); #endif #pragma acc data copyout(a[0:(M*N)]), copyin(b[0:(M*P)],c[0:(P*N)]) { #pragma acc kernels loop independent gang for (i=0; i<M; i++){ #pragma acc loop worker for (j=0; j<N; j++) { float sum = 0.0 ; #pragma acc loop seq for (k=0; k<P; k++) { sum += b[i*P+k]*c[k*N+j] ; } a[i*N+j] = sum ; } } } //Fake computation to measure timing of unified memory version cSum = 0.0; for( i=0; i<N; i++ ) for( j=0; j<N; j++ ) cSum += a[i*N+j]; printf("Sum of GPU_A: %lf\n", cSum); #ifdef _OPENACCM acc_shutdown(acc_device_default); #endif } void MatrixMultiplication_openmp(float * a,float * b, float * c) { int i, j, k ; int chunk = N/4; #pragma omp parallel shared(a,b,c,chunk) private(i,j,k) { #ifdef _OPENMP if(omp_get_thread_num() == 0) { printf("Number of OpenMP threads %d\n", omp_get_num_threads()); } #endif #pragma omp for for (i=0; i<M; i++){ for (j=0; j<N; j++) { float sum = 0.0 ; for (k=0; k<P; k++) sum += b[i*P+k]*c[k*N+j] ; a[i*N+j] = sum ; } } } } int main() { float *a, *b, *c; float *a_CPU, *b_CPU, *c_CPU; int i,j; double elapsed_time; a = (float *) acc_create_unified(NULL, M*N*sizeof(float)); b = (float *) acc_create_unified(NULL, M*P*sizeof(float)); c = (float *) acc_create_unified(NULL, P*N*sizeof(float)); a_CPU = (float *) malloc(M*N*sizeof(float)); b_CPU = (float *) malloc(M*P*sizeof(float)); c_CPU = (float *) malloc(P*N*sizeof(float)); for (i = 0; i < M*N; i++) { a[i] = (float) 0.0F; a_CPU[i] = (float) 0.0F; } for (i = 0; i < M*P; i++) { b[i] = (float) i; b_CPU[i] = (float) i; } for (i = 0; i < P*N; i++) { c[i] = (float) 1.0F; c_CPU[i] = (float) 1.0F; } elapsed_time = my_timer(); MatrixMultiplication_openmp(a_CPU,b_CPU,c_CPU); elapsed_time = my_timer() - elapsed_time; printf("CPU Elapsed time = %lf sec\n", elapsed_time); elapsed_time = my_timer(); MatrixMultiplication_openacc(a,b,c); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time = %lf sec\n", elapsed_time); #if VERIFICATION == 1 { double cpu_sum = 0.0; double gpu_sum = 0.0; double rel_err = 0.0; for (i=0; i<M*N; i++){ cpu_sum += a_CPU[i]*a_CPU[i]; gpu_sum += a[i]*a[i]; } cpu_sum = sqrt(cpu_sum); gpu_sum = sqrt(gpu_sum); if( cpu_sum > gpu_sum ) { rel_err = (cpu_sum-gpu_sum)/cpu_sum; } else { rel_err = (gpu_sum-cpu_sum)/cpu_sum; } if(rel_err < 1e-6) { printf("Verification Successful err = %e\n", rel_err); } else { printf("Verification Fail err = %e\n", rel_err); } } #endif free(a_CPU); free(b_CPU); free(c_CPU); acc_delete_unified(a, M*N*sizeof(float)); acc_delete_unified(b, M*N*sizeof(float)); acc_delete_unified(c, M*N*sizeof(float)); return 0; }
StochasticLutN.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <algorithm> #include <array> #include <vector> #include "bb/StochasticLutModel.h" #include "bb/FixedSizeConnectionTable.h" #include "bb/StochasticOperation.h" #include "bb/StochasticLutSimd.h" namespace bb { // 確率的LUTの抽象レイヤー template <int N = 6, typename BinType = float, typename RealType = float> class StochasticLutN : public StochasticLutModel { using _super = StochasticLutModel; static int const NN = (1 << N); public: static inline std::string ModelName(void) { return "StochasticLut" + std::to_string(N); } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; bool m_host_simd = true; bool m_binary_mode = (DataType<BinType>::type == BB_TYPE_BIT); bool m_lut_binarize = true; bool m_y_binarize = false; index_t m_max_tmp_mem_size = 256 * 1024 * 1024; std::string m_connection; RealType m_unbinarize_bias = (RealType)0.25; indices_t m_input_shape; indices_t m_output_shape; FrameBuffer m_x_buf; FixedSizeConnectionTable<N> m_connection_table; std::shared_ptr<Tensor> m_W; std::shared_ptr<Tensor> m_dW; std::mt19937_64 m_mt; public: struct create_t { indices_t output_shape; //< 出力形状 std::string connection; //< 結線ルール std::uint64_t seed = 1; //< 乱数シード }; protected: StochasticLutN(create_t const &create) { BB_ASSERT(!create.output_shape.empty()); m_output_shape = create.output_shape; m_connection = create.connection; m_mt.seed(create.seed); m_W = std::make_shared<Tensor>(); m_dW = std::make_shared<Tensor>(); } void CommandProc(std::vector<std::string> args) { // バイナリモード設定 if (DataType<BinType>::type != BB_TYPE_BIT) { if ( args.size() == 2 && args[0] == "binary") { m_binary_mode = EvalBool(args[1]); } } // LUTバイナライズ設定 if ( args.size() == 2 && args[0] == "lut_binarize" ) { m_lut_binarize = EvalBool(args[1]); } // Y出力バイナライズ設定 if ( args.size() == 2 && args[0] == "y_binarize" ) { m_y_binarize = EvalBool(args[1]); } // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } // Host SIMDモード設定 if (args.size() == 2 && args[0] == "host_simd") { m_host_simd = EvalBool(args[1]); } } public: ~StochasticLutN() {} static std::shared_ptr<StochasticLutN> Create(create_t const &create) { return std::shared_ptr<StochasticLutN>(new StochasticLutN(create)); } static std::shared_ptr<StochasticLutN> Create(indices_t const &output_shape, std::string connection = "", std::uint64_t seed = 1) { create_t create; create.output_shape = output_shape; create.connection = connection; create.seed = seed; return Create(create); } static std::shared_ptr<StochasticLutN> Create(index_t output_node_size, std::string connection = "", std::uint64_t seed = 1) { create_t create; create.output_shape.resize(1); create.output_shape[0] = output_node_size; create.connection = connection; create.seed = seed; return Create(create); } static std::shared_ptr<StochasticLutN> Create(void) { return Create(create_t()); } #ifdef BB_PYBIND11 static std::shared_ptr<StochasticLutN> CreatePy(indices_t const &output_shape, std::string connection = "", std::uint64_t seed = 1) { create_t create; create.output_shape = output_shape; create.connection = connection; create.seed = seed; return Create(create); } #endif // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_host_simd); bb::SaveValue(os, m_binary_mode); bb::SaveValue(os, m_lut_binarize); bb::SaveValue(os, m_y_binarize); bb::SaveValue(os, m_max_tmp_mem_size); bb::SaveValue(os, m_connection); bb::SaveValue(os, m_unbinarize_bias); bb::SaveValue(os, m_input_shape); bb::SaveValue(os, m_output_shape); m_connection_table.DumpObject(os); m_W->DumpObject(os); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_host_simd); bb::LoadValue(is, m_binary_mode); bb::LoadValue(is, m_lut_binarize); bb::LoadValue(is, m_y_binarize); bb::LoadValue(is, m_max_tmp_mem_size); bb::LoadValue(is, m_connection); bb::LoadValue(is, m_unbinarize_bias); bb::LoadValue(is, m_input_shape); bb::LoadValue(is, m_output_shape); m_connection_table.LoadObject(is); m_W->LoadObject(is); // 再構築 m_dW->Resize({CalcShapeSize(m_output_shape), NN}, DataType<RealType>::type); m_dW->FillZero(); } public: // Serialize(旧) void Save(std::ostream &os) const override { SaveIndices(os, m_input_shape); SaveIndices(os, m_output_shape); m_connection_table.Save(os); m_W->Save(os); } void Load(std::istream &is) override { m_input_shape = LoadIndices(is); m_output_shape = LoadIndices(is); m_connection_table.Load(is); m_W->Load(is); } #ifdef BB_WITH_CEREAL template <class Archive> void save(Archive& archive, std::uint32_t const version) const { _super::save(archive, version); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); archive(cereal::make_nvp("connection_table", m_connection_table)); archive(cereal::make_nvp("W", *m_W)); } template <class Archive> void load(Archive& archive, std::uint32_t const version) { _super::load(archive, version); archive(cereal::make_nvp("input_shape", m_input_shape)); archive(cereal::make_nvp("output_shape", m_output_shape)); archive(cereal::make_nvp("connection_table", m_connection_table)); archive(cereal::make_nvp("W", *m_W)); } void Save(cereal::JSONOutputArchive& archive) const { archive(cereal::make_nvp("StochasticLutN", *this)); } void Load(cereal::JSONInputArchive& archive) { archive(cereal::make_nvp("StochasticLutN", *this)); } #endif Tensor &W(void) { return *m_W; } Tensor const &W(void) const { return *m_W; } Tensor &dW(void) { return *m_dW; } Tensor const &dW(void) const { return *m_dW; } auto lock_W(void) { return m_W->Lock<RealType>(); } auto lock_W_const(void) const { return m_W->LockConst<RealType>(); } auto lock_dW(void) { return m_dW->Lock<RealType>(); } auto lock_dW_const(void) const { return m_dW->LockConst<RealType>(); } // 接続管理 index_t GetNodeConnectionSize(index_t node) const override { return m_connection_table.GetInputConnectionSize(node); } void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override { m_connection_table.SetInputConnection(node, input_index, input_node); } index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override { return m_connection_table.GetInputConnection(node, input_index); } /** * @brief 入力のshape設定 * @detail 入力のshape設定 * @param shape 新しいshape * @return なし */ indices_t SetInputShape(indices_t shape) override { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } // 形状設定 m_input_shape = shape; // 接続初期化 m_connection_table.SetShape(m_input_shape, m_output_shape); m_connection_table.InitializeConnection(m_mt(), m_connection); // auto output_node_size = CalcShapeSize(m_output_shape); // m_input_index.Resize(output_node_size, N); // this->InitializeNodeInput(m_mt(), m_connection); // パラメータ初期化(結局初期値は何が良いのかまだよくわからない) // m_W->Resize({m_output_node_size, NN}, DataType<RealType>::type); m_W->InitUniformDistribution(0.4, 0.6, m_mt()); // m_W->Resize({m_output_node_size, NN}, DataType<RealType>::type); m_W->InitUniformDistribution(0.0, 1.0, m_mt()); // m_W->Resize({m_output_node_size, NN}, DataType<RealType>::type); m_W->InitNormalDistribution(0.5, 0.001, m_mt()); m_W->Resize({CalcShapeSize(m_output_shape), NN}, DataType<RealType>::type); m_W->InitNormalDistribution(0.5, 0.01, m_mt()); m_dW->Resize({CalcShapeSize(m_output_shape), NN}, DataType<RealType>::type); m_dW->FillZero(); return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const override { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const override { return m_output_shape; } Variables GetParameters(void) override { Variables parameters; parameters.PushBack(m_W); return parameters; } Variables GetGradients(void) override { Variables gradients; gradients.PushBack(m_dW); return gradients; } void SetFrameBufferX(FrameBuffer x) { m_x_buf = x; } FrameBuffer GetFrameBufferX(void) { return m_x_buf; } // ノード単位でのForward計算 std::vector<double> ForwardNode(index_t node, std::vector<double> input_value) const override { BB_ASSERT(input_value.size() == N); auto W_ptr = lock_W_const(); RealType W[(1 << N)]; for ( int i = 0; i < (1 << N); ++i) { W[i] = W_ptr(node, i); W[i] = std::min((RealType)1.0, std::max((RealType)0.0, W[i])); // clip if ( m_lut_binarize ) { W[i] = W[i] > (RealType)0.5 ? (RealType)1.0 : (RealType)0.0; } } RealType x[N][2]; for ( int i = 0; i < N; ++i) { RealType x_tmp = (RealType)input_value[i]; if ( m_binary_mode ) { x_tmp = (RealType)0.5 + ((x_tmp > (RealType)0.5) ? +m_unbinarize_bias : -m_unbinarize_bias); // unbinarize } else { x_tmp = std::min((RealType)1.0, std::max((RealType)0.0, x_tmp)); // clip } x[i][0] = (RealType)1.0 - x_tmp; x[i][1] = x_tmp; } RealType y = (RealType)0; for (int i = 0; i < NN; ++i) { RealType w = W[i]; for (int j = 0; j < N; ++j) { w *= x[j][(i >> j) & 1]; } y += w; } // clip y = std::max((RealType)0.0, y); y = std::min((RealType)1.0, y); std::vector<double> result; result.push_back((double)y); return result; } FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override { BB_ASSERT(x_buf.GetType() == DataType<BinType>::type); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetShape() != m_input_shape) { SetInputShape(x_buf.GetShape()); } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<RealType>::type); // backwardの為に保存 if ( train ) { m_x_buf = x_buf; } // パラメータクリップ m_W->Clamp((RealType)0.0, (RealType)1.0); #ifdef BB_WITH_CUDA // LUT6 FP32 CUDA if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable(); auto W_ptr = m_W->LockDeviceMemoryConst(); bbcu_fp32_StochasticLut_Forward<N>( (float const *)x_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int const *)input_table_ptr.GetAddr(), (float const *)W_ptr.GetAddr(), (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)), (int )(m_binary_mode ? 1 : 0), (int )(m_lut_binarize ? 1 : 0), (float )m_unbinarize_bias ); return y_buf; } // LUT6 Bit CUDA if ( DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable(); auto W_ptr = m_W->LockDeviceMemoryConst(); bbcu_bit_fp32_StochasticLut_Forward<N>( (int const *)x_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int const *)input_table_ptr.GetAddr(), (float const *)W_ptr.GetAddr(), (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)), (int )(x_buf.GetFrameStride() / sizeof(int)), (int )(m_lut_binarize ? 1 : 0), (float )m_unbinarize_bias ); return y_buf; } #endif // LUT6 SIMD if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && m_host_simd && y_buf.GetFrameSize() % 8 == 0 ) { auto input_table_ptr = m_connection_table.LockConst_InputTable(); simd_fp32_StochasticLut6_Forward(x_buf, y_buf, input_table_ptr.GetAddr(), m_W, m_binary_mode, m_lut_binarize, m_unbinarize_bias); return y_buf; } { // Generic auto node_size = y_buf.GetNodeSize(); auto frame_size = y_buf.GetFrameSize(); auto x_ptr = x_buf.LockConst<BinType>(); auto y_ptr = y_buf.Lock<RealType>(); auto input_table_ptr = m_connection_table.LockConst_InputTable(); auto W_ptr = lock_W_const(); #pragma omp parallel for for ( index_t node = 0; node < node_size; ++node ) { // read W RealType W[(1 << N)]; for ( int i = 0; i < (1 << N); ++i) { W[i] = W_ptr(node, i); if ( m_lut_binarize ) { W[i] = W[i] > (RealType)0.5 ? (RealType)1.0 : (RealType)0.0; // binarize } } for ( index_t frame = 0; frame < frame_size; ++frame ) { // read x RealType x[N]; for ( int i = 0; i < N; ++i) { RealType x_tmp = (RealType)x_ptr.Get(frame, input_table_ptr(node, i)); if ( m_binary_mode || DataType<BinType>::type == BB_TYPE_BIT ) { x[i] = (RealType)0.5 + (x_tmp > (RealType)0.5 ? +m_unbinarize_bias : -m_unbinarize_bias); // unbinarize } else { x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x_tmp)); // clip } } // calculate RealType y; StochasticOperation_Lut_Forward<RealType>(x, &y, W, N); // clip y = std::max((RealType)0.0, y); y = std::min((RealType)1.0, y); y_ptr.Set(frame, node, y); } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) override { if (dy_buf.Empty()) { m_dW = 0; return FrameBuffer(); } BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type); FrameBuffer x_buf = m_x_buf; m_x_buf = FrameBuffer(); FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<RealType>::type); #ifdef BB_WITH_CUDA // LUT6 FP32 CUDA if ( DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { // tmp buffer index_t tmp_frame_size = m_max_tmp_mem_size / (sizeof(float) * this->GetOutputNodeSize()*N); tmp_frame_size = std::max(tmp_frame_size, (index_t)32); tmp_frame_size = ((tmp_frame_size + 31) & ~0x1f); tmp_frame_size = std::min(tmp_frame_size, dy_buf.GetFrameSize()); FrameBuffer tmp_buf(tmp_frame_size, {this->GetOutputNodeSize()*N}, DataType<RealType>::type); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(true); auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable(); auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable(); auto W_ptr = m_W->LockDeviceMemoryConst(); auto dW_ptr = m_dW->LockDeviceMemory(); auto tmp_ptr = tmp_buf.LockDeviceMemory(); bbcu_fp32_StochasticLut_Backward<N>( (float const *)x_ptr.GetAddr(), (float const *)dy_ptr.GetAddr(), (float *)dx_ptr.GetAddr(), (float *)tmp_ptr.GetAddr(), (int const *)input_table_ptr.GetAddr(), (int const *)reverse_table_ptr.GetAddr(), (float const *)W_ptr.GetAddr(), (float *)dW_ptr.GetAddr(), (int )m_connection_table.GetReverseTableStride(), (int )dx_buf.GetNodeSize(), (int )dy_buf.GetNodeSize(), (int )dx_buf.GetFrameSize(), (int )(dx_buf.GetFrameStride() / sizeof(float)), (int )tmp_buf.GetFrameSize(), (int )(tmp_buf.GetFrameStride() / sizeof(float)), (int )(m_binary_mode ? 1 : 0), (int )(m_lut_binarize ? 1 : 0), (float )m_unbinarize_bias ); return dx_buf; } // LUT6 Bit CUDA if ( DataType<BinType>::type == BB_TYPE_BIT && DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && x_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable()) { // tmp buffer index_t tmp_frame_size = m_max_tmp_mem_size / (sizeof(float) * this->GetOutputNodeSize()*N); tmp_frame_size = std::max(tmp_frame_size, (index_t)32); tmp_frame_size = ((tmp_frame_size + 31) & ~0x1f); tmp_frame_size = std::min(tmp_frame_size, dy_buf.GetFrameSize()); FrameBuffer tmp_buf(tmp_frame_size, {this->GetOutputNodeSize()*N}, DataType<RealType>::type); auto x_ptr = x_buf.LockDeviceMemoryConst(); auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(true); auto reverse_table_ptr = m_connection_table.LockDeviceMemConst_ReverseTable(); auto input_table_ptr = m_connection_table.LockDeviceMemConst_InputTable(); auto W_ptr = m_W->LockDeviceMemoryConst(); auto dW_ptr = m_dW->LockDeviceMemory(); auto tmp_ptr = tmp_buf.LockDeviceMemory(); bbcu_bit_fp32_StochasticLut_Backward<N>( (int const *)x_ptr.GetAddr(), (float const *)dy_ptr.GetAddr(), (float *)dx_ptr.GetAddr(), (float *)tmp_ptr.GetAddr(), (int const *)input_table_ptr.GetAddr(), (int const *)reverse_table_ptr.GetAddr(), (float const *)W_ptr.GetAddr(), (float *)dW_ptr.GetAddr(), (int )m_connection_table.GetReverseTableStride(), (int )dx_buf.GetNodeSize(), (int )dy_buf.GetNodeSize(), (int )dx_buf.GetFrameSize(), (int )(dx_buf.GetFrameStride() / sizeof(float)), (int )(x_buf.GetFrameStride() / sizeof(int)), (int )tmp_buf.GetFrameSize(), (int )(tmp_buf.GetFrameStride() / sizeof(float)), (int )(m_lut_binarize ? 1 : 0), (float )m_unbinarize_bias ); return dx_buf; } #endif // LUT6 SIMD if ( N == 6 && DataType<BinType>::type == BB_TYPE_FP32 && DataType<RealType>::type == BB_TYPE_FP32 && m_host_simd && dy_buf.GetFrameSize() % 8 == 0 ) { auto input_table_ptr = m_connection_table.LockConst_InputTable(); simd_fp32_StochasticLut6_Backward(x_buf, dy_buf, dx_buf, input_table_ptr.GetAddr(), m_W, m_dW, m_unbinarize_bias, m_binary_mode, m_lut_binarize); return dx_buf; } { FrameBuffer tmp_buf(dy_buf.GetFrameSize(), {CalcShapeSize(m_output_shape)*N}, DataType<RealType>::type); // generic dx_buf.FillZero(); auto node_size = dy_buf.GetNodeSize(); auto frame_size = dy_buf.GetFrameSize(); auto x_ptr = x_buf.LockConst<BinType>(); auto dy_ptr = dy_buf.LockConst<RealType>(); auto tmp_ptr = tmp_buf.Lock<RealType>(true); auto input_table_ptr = m_connection_table.LockConst_InputTable(); auto W_ptr = lock_W_const(); auto dW_ptr = lock_dW(); #pragma omp parallel for for ( index_t node = 0; node < node_size; ++node ) { // read W RealType W[1 << N]; for ( int i = 0; i < NN; ++i) { W[i] = W_ptr(node, i); if ( m_lut_binarize ) { W[i] = (W[i] > (RealType)0.5) ? (RealType)1.0 : (RealType)0.0; } } // setup dW RealType dW[NN] = {0}; for ( index_t frame = 0; frame < frame_size; ++frame ) { // read x RealType x[N]; for ( int i = 0; i < N; ++i) { RealType x_tmp = (RealType)x_ptr.Get(frame, input_table_ptr(node, i)); if ( m_binary_mode || DataType<BinType>::type == BB_TYPE_BIT ) { x[i] = (RealType)0.5 + (x_tmp > (RealType)0.5 ? +m_unbinarize_bias : -m_unbinarize_bias); // unbinarize } else { x[i] = std::min((RealType)1.0, std::max((RealType)0.0, x_tmp)); // clip } } // read dy RealType dy = dy_ptr.Get(frame, node); // calculate RealType dx[N]; StochasticOperation_Lut_Backward<RealType>(x, dx, &dy, W, dW, N); // write dx for (int i = 0; i < N; ++i) { tmp_ptr.Set(frame, node * N + i, dx[i]); } } // write dW for ( int i = 0; i < NN; ++i) { dW_ptr(node, i) += dW[i]; } } // integrate dx auto dx_ptr = dx_buf.Lock<RealType>(); #pragma omp parallel for for ( index_t frame = 0; frame < frame_size; ++frame ) { for ( index_t node = 0; node < node_size; ++node ) { for (int i = 0; i < N; ++i) { RealType dx = tmp_ptr.Get(frame, node * N + i); auto input_node = input_table_ptr(node, i); dx_ptr.Add(frame, input_node, dx); } } } return dx_buf; } } }; }
aux_loads.h
#include <dirent.h> #include <sys/types.h> #include <cvpp/containers/matrix.h> #include <cvpp/containers/vector.h> #include <cvpp/containers/image.h> #include <cvpp/properties/pose.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> using namespace cvpp; Seq<String> get_files( const String& dir , const int& n = 0 ) { DIR *dp; struct dirent *dirp; Seq<String> files; if( ( dp = opendir( dir.c_str() ) ) == NULL ) disp( "Error Opening" , dir ); while( ( dirp = readdir( dp ) ) != NULL) { String file( dirp->d_name ); if( file[ file.size() - 4 ] == '.' ) files.push_back( dir + file ); } closedir( dp ); std::sort( files.begin() , files.end() ); if( n > 0 ) files.resize( n ); return files; } Matf load_vel2cam( const String& file ) { String line; std::ifstream infile( file + "/calib_velo_to_cam.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } Matf load_imu2vel( const String& file ) { String line; std::ifstream infile( file + "/calib_imu_to_velo.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } void load_cam2cam( const String& dir , Matf& K , Matf& D , Matf& R , Matf& P ) { String file = dir + "/calib_cam_to_cam.txt"; String line; std::ifstream infile( file ); float k[9] , d[5] , r[9] , p[12]; while( std::getline( infile , line ) ) { if( line.substr(0,4).compare( "K_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , k , ' ' ); if( line.substr(0,4).compare( "D_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , d , ' ' ); if( line.substr(0,9).compare( "R_rect_00" ) == 0 ) tokenFloat( line.substr(10).c_str() , r , ' ' ); if( line.substr(0,9).compare( "P_rect_02" ) == 0 ) tokenFloat( line.substr(10).c_str() , p , ' ' ); } K.reset( 3 , 3 ); forLOOPij( K.r() , K.c() ) K(i,j) = k[ i * K.c() + j ]; D.reset( 5 ); forLOOPi( D.r() ) D(i) = d[ i ]; R.reset( 4 , 4 ).setIdentity(); forLOOPij( 3 , 3 ) R(i,j) = r[ i * 3 + j ]; R.blu(3) = R.blu(3).t(); P.reset( 3 , 4 ); forLOOPij( P.r() , P.c() ) P(i,j) = p[ i * P.c() + j ]; P = P.t(); } SeqMatd load_vel( const Seq<String>& files ) { int n = files.size(); SeqMatd vels( n ); int base = 1000000; float *data = (float*)malloc( base * sizeof(float) ); forLOOPi( n ) { float *px = data + 0 , *py = data + 1; float *pz = data + 2 , *pr = data + 3; FILE *stream; stream = fopen( files[i].c_str() , "rb" ); int num = fread( data , sizeof(float) , base , stream ) / 4; vels[i].reset( num , 4 ); forLOOPj( num ) { vels[i].row(j) << *px , *py , *pz , *pr ; px += 4 ; py += 4 ; pz += 4 ; pr += 4 ; } fclose( stream ); } return vels; } SeqImg3c load_img( const Seq<String>& files ) { int n = files.size(); SeqImg3c imgs( n ); #pragma omp parallel for forLOOPi( n ) { imgs[i].load( files[i] ); } return imgs; } SeqPosef load_pos( const Seq<String>& files ) { int n = files.size(); Matf data( n , 30 ); forLOOPi( n ) { float vals[30]; std::ifstream infile( files[i] ); String line; while( std::getline( infile , line ) ) tokenFloat( ( ' ' + line ).c_str() , vals , ' ' ); forLOOPj( data.c() ) data(i,j) = vals[j]; } float lat0 = data(0,0); float r = 6378137 , s = std::cos( lat0 * PI / 180.0 ); float sr = s * r; Matf xyz( n , 6 ); forLOOPi( xyz.r() ) { float lat = data(i,0) , lon = data(i,1); float z = data(i,2) , r = data(i,3) , p = data(i,4) , w = data(i,5); float x = sr * PI * lon / 180.0; float y = sr * std::log( std::tan( PI * ( 90.0 + lat ) / 360.0 ) ); xyz.row(i) << x , y , z , r , p , w; } Matf off = xyz.cl(3).r(0).clone(); xyz.cl(3) -= off; SeqPosef poses( n ); forLOOPi( poses.size() ) poses[i].setPose( xyz.r(i) ); return poses; } void prep_dirs( const String& dir ) { String path; struct stat st = {0}; path = dir + "/proc2"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/imgs"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/disp1"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/disp2"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/xyz"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/all"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc2/uvxyz"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); } void save_data( const String& suf , const String& dir , const String& path , const Img3c& img , const Img1c& dsp1 , const Img1c& dsp2 , const Matf& xyz , const Matf& uvxyz , const Matf& all ) { int nn = path.length() , n = 0; while( path[ nn - n ] != '/' ) n++; n--; String name = path.substr( nn - n , n -4 ); String sufname = suf + name; disp( sufname ); img.saveIMG( dir + "/proc2/imgs/" + sufname + ".png" ); dsp1.saveIMG( dir + "/proc2/disp1/" + sufname + ".png" ); dsp2.saveIMG( dir + "/proc2/disp2/" + sufname + ".png" ); xyz.saveBIN( dir + "/proc2/xyz/" + sufname + ".bin" ); all.saveBIN( dir + "/proc2/all/" + sufname + ".bin" ); uvxyz.saveBIN( dir + "/proc2/uvxyz/" + sufname + ".bin" ); }
pr34692.c
/* PR preprocessor/34692 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-gimple" } */ /* { dg-final { scan-tree-dump-times "#pragma omp parallel" 1 "gimple" } } */ /* { dg-final { scan-tree-dump-times "#pragma omp for private" 1 "gimple" } } */ void foo (void) { int i; #define FOO(y, x) y #x #define BAR(x) x #define BAZ(x) x FOO (for (i = 0; i < 10; i++) { const char *vara =, a #define P parallel #pragma omp P #undef P #define P for b #pragma omp P #undef P #define parallel atomic cde f g h); } }
boundary_conditions_and_contact_utilities.h
/* ============================================================================== Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: Nelson Lafontaine $ // Date: $Date: 2007-03-06 10:30:34 $ // Revision: $Revision: 1.2 $ // // #if !defined(KRATOS_BOUNDARY_CONDITIONS_AND_CONTACT_UTILITIES_INCLUDED ) #define KRATOS_BOUNDARY_CONDITIONS_AND_CONTACT_UTILITIES_INCLUDED // System includes #include <string> #include <iostream> #include <iomanip> #include <fstream> #include <algorithm> #include <set> #include <time.h> #ifdef _OPENMP #include <omp.h> #endif // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/element.h" #include "includes/model_part.h" #include "includes/mesh.h" #include "geometries/geometry.h" #include "geometries/point_2d.h" #include "geometries/point_3d.h" #include "geometries/line_2d_2.h" #include "geometries/triangle_2d_3.h" #include "geometries/tetrahedra_3d_4.h" #include "spatial_containers/spatial_containers.h" #include "spatial_containers/bounding_box.h" #include "spatial_containers/cell.h" #include "spatial_containers/bins_dynamic_objects.h" #include "spatial_containers/bins_dynamic.h" #include "spatial_containers/bins_static_objects.h" #include "utilities/spatial_containers_configure.h" #include "utilities/geometry_utilities.h" #include "utilities/timer.h" // #include "utilities/timer_CLabra.h" #include "custom_conditions/slave_contact_point_2d.h" #include "custom_conditions/slave_contact_point_3d.h" #include "custom_conditions/master_contact_point_2d.h" #include "custom_conditions/master_contact_face_2d.h" #include "custom_conditions/master_contact_face_3D.h" #include "custom_conditions/point_segment_contact_link.h" #include "custom_conditions/point_point_contact_link.h" #include "custom_conditions/contact_link_3D_explicit.h" #include "geometries/plane.h" #include "custom_utilities/segment_2d.h" #include "custom_utilities/intersect_triangles_cases.h" #include "processes/find_nodal_neighbours_process.h" #include "processes/find_elements_neighbours_process.h" #include "processes/find_conditions_neighbours_process.h" #include "structural_application.h" namespace Kratos { class BoundaryConditionsAndContactUtilities { public: #define EPSILON 1.0e-10 #define NEPSILON -1.0e-10 #define BEPSILON 1.0e+15 static const int IT_POINT = 0; static const int IT_SEGMENT = 1; static const int IT_EMPTY = 2; enum Exist_Node {no_nodes = 0, yes_nodes}; enum Near_Node {no_near = 0, yes_near}; enum Object {is_node = 0, is_object}; KRATOS_CLASS_POINTER_DEFINITION(BoundaryConditionsAndContactUtilities); /// Utilities typedef IntersectionSegment2DToSegment2D IntersectionSegments; /// Elements typedef ModelPart::ElementsContainerType ElementsArrayType; /* typedef ModelPart::ElementsContainerType::ContainerType ContainerType; typedef ContainerType::value_type PointerType; typedef ContainerType::iterator IteratorType; typedef std::vector<PointerType>::iterator PointerTypeIterator; typedef ContactPair<PointerType> ContactPairType; typedef std::vector<ContactPairType> ContainerContactPair; typedef ContainerContactPair::iterator IteratorContainerContactPair; typedef ContainerContactPair::value_type PointerContainerContactPair; */ /// Conditions General typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef ConditionsContainerType::iterator ConditionsIteratorType; typedef ConditionsContainerType::value_type ConditionsPointerType; typedef ContactPair<ConditionsPointerType> ConditionsContactPairType; typedef std::vector<ConditionsPointerType>::iterator ConditionsPointerTypeIterator; typedef std::vector<ConditionsContactPairType> ConditionsContainerContactPair; typedef ConditionsContainerContactPair::iterator ConditionsIteratorContainerContactPair; typedef ConditionsContainerContactPair::value_type ConditionsPointerContainerContactPair; /// Condition Especificas typedef SlaveContactPoint2D SlaveContactPointType; typedef MasterContactPoint2D MasterContactPointType; typedef MasterContactFace2D MasterContactFaceType; ///Nodes and properties typedef Node<3> NodeType; typedef Node<3>::Pointer NodePointerType; typedef Geometry<NodeType> GeometryType; typedef GeometryType::PointsArrayType PointsArrayType; typedef ModelPart::NodesContainerType NodesArrayType; typedef Element::GeometryType GeomType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef NodesContainerType::iterator NodesIteratorType; typedef Properties PropertiesType; static const std::size_t space_dim = 2; typedef SpatialContainersConfigure<space_dim> Configure; typedef Configure::PointType PointType; typedef PointType::CoordinatesArrayType CoordinatesArrayType; typedef Configure::ContainerType ContainerType; typedef Configure::PointerType PointerType; typedef Configure::IteratorType IteratorType; typedef Configure::ResultContainerType ResultContainerType; typedef Configure::ResultPointerType ResultPointerType; typedef Configure::ResultIteratorType ResultIteratorType; typedef Configure::ContactPairType ContactPairType; typedef Configure::ContainerContactType ContainerContactType; typedef Configure::IteratorContactType IteratorContactType; typedef Configure::PointerContactType PointerContactType; typedef Configure::PointerTypeIterator PointerTypeIterator; typedef ContainerContactType ContainerContactPair; typedef IteratorContactType IteratorContainerContactPair; typedef PointerContactType PointerContainerContactPair; BoundaryConditionsAndContactUtilities() {} BoundaryConditionsAndContactUtilities(ModelPart& model_part, const unsigned int& dimension, const double& penalty_factor) : mr_model_part(model_part), mrdimension(dimension) { mpenalty_factor = penalty_factor; mcompute_boundary_contour = true; } virtual ~BoundaryConditionsAndContactUtilities() {} //************************************************************************************ //************************************************************************************ // Crea las conciones de contacto valid for lagrage multiplier y setea los elementos // que son parte del contorno void CreateBoundaries(const unsigned int& initial_conditions_size) { KRATOS_TRY Clear(initial_conditions_size); if(mcompute_boundary_contour) { std::cout<<"CREATING MASTER SURFACES"<< std::endl; if(mrdimension==2) CalculateBoundaryContour2D(mMasterConditionsArray); else CalculateBoundaryContour3D(mMasterConditionsArray); mcompute_boundary_contour = false; } return; KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ /// this function use the potencial contact force concept void ComputeContactForce() { KRATOS_TRY if(mrdimension!= int(space_dim)) KRATOS_THROW_ERROR(std::logic_error, "The Dimension of Configure and ModelPart not iquals " , ""); IteratorType it_begin = mBoundaryElements.begin(); IteratorType it_end = mBoundaryElements.end(); //BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end); //BinsObjectDynamic<Configure>* rBins = &rBinsObjectDynamic; ///Bins estatico BinsObjectStatic<Configure> Bins(it_begin, it_end); BinsObjectStatic<Configure>* rBins = &Bins; const std::size_t MaxNumberOfResults = 1000; std::size_t NumberOfResults = 0; ResultIteratorType begin; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> partition; CreatePartition(number_of_threads, mBoundaryElements.size(), partition); //ContactPairType it_pair; ResultContainerType Result(MaxNumberOfResults); std::cout<<" PARTITION COMPUTING CONTACT CONDITIONS = " << number_of_threads << std::endl; if(mrdimension==2) { #pragma omp parallel for firstprivate(NumberOfResults,Result) private(begin) for(int k=0; k<number_of_threads; k++) { IteratorType it_begin = mBoundaryElements.begin() + partition[k]; IteratorType it_end = mBoundaryElements.begin() + partition[k+1]; for(IteratorType it =it_begin; it!=it_end; ++it) { begin = Result.begin(); NumberOfResults = rBins->SearchObjects(*it, begin, MaxNumberOfResults); if(NumberOfResults!=0) { for(ResultIteratorType rthis = Result.begin(); rthis!= Result.begin() + NumberOfResults; rthis++) { if((*rthis)->GetValue(IS_TARGET)== false && (*it)->Id()!=(*rthis)->Id() && FiltratePairContacts(*it, *rthis)==true) { ComputeContactForce2D(*it, *rthis); ComputeContactDampingForces(*it, *rthis); } } (*it)->GetValue(IS_TARGET) = true; } } } } else { #ifdef _OPENMP double start_prod = omp_get_wtime(); #endif #pragma omp parallel for firstprivate(NumberOfResults,Result) private(begin) for(int k=0; k<number_of_threads; k++) { IteratorType it_begin = mBoundaryElements.begin() + partition[k]; IteratorType it_end = mBoundaryElements.begin() + partition[k+1]; for(IteratorType it =it_begin; it!=it_end; ++it) { //Result[k].clear(); //Result[k].reserve(100); //rBinsObjectDynamic.SearchObjects(*it, Result[k]); begin = Result.begin(); NumberOfResults = rBins->SearchObjects(*it, begin, MaxNumberOfResults); //if(Result[k].size()!=0){ if(NumberOfResults!=0) for(ResultIteratorType rthis = Result.begin(); rthis!= Result.begin() + NumberOfResults /*rthis!=Result[k].end()*/ ; rthis++) { if((*rthis)->GetValue(IS_TARGET)== false && (*it)->Id()!=(*rthis)->Id() && FiltratePairContacts(*it, *rthis)==true) { ComputeContactForce3D(*it, *rthis); } } (*it)->GetValue(IS_TARGET) = true; } } #ifdef _OPENMP double stop_prod = omp_get_wtime(); std::cout <<" Time Calculating Forces Contact = " << stop_prod - start_prod << std::endl; #endif } std::cout<<" FINISHING COMPUTE CONTACT CONDITIONS " << std::endl; KRATOS_CATCH("") } void ComputeContactDampingForces(const PointerType& Target, const PointerType& Contactor) { KRATOS_TRY typedef Element::GeometryType::Pointer GeometryPointer; double dampT = (Target->GetProperties()[DAMPING_RATIO]); double dampC = (Contactor->GetProperties()[DAMPING_RATIO]); double damp = std::max(dampT, dampC); const GeometryPointer& GTarget = (Target)->pGetGeometry(); const GeometryPointer& GContactor = (Contactor)->pGetGeometry(); double pen_vec_tar = mpenalty_factor * (Target->GetProperties()[YOUNG_MODULUS]); double pen_vec_con = mpenalty_factor * (Contactor->GetProperties()[YOUNG_MODULUS]); double pen = std::min(pen_vec_tar, pen_vec_con); double massC = 0.00; double massT = 0.00; array_1d<double ,3> vel_T = ZeroVector(3); array_1d<double ,3> vel_C = ZeroVector(3); array_1d<double ,3> vel = ZeroVector(3); for(unsigned int i = 0; i<(*GTarget).size(); i++) { massT+= (*GTarget)(i)->FastGetSolutionStepValue(NODAL_MASS); noalias(vel_T) += (*GTarget)(i)->FastGetSolutionStepValue(VELOCITY); } for(unsigned int i = 0; i<(*GContactor).size(); i++) { massC+= (*GContactor)(i)->FastGetSolutionStepValue(NODAL_MASS); noalias(vel_C) += (*GContactor)(i)->FastGetSolutionStepValue(VELOCITY); } double fact = (massT * massC * pen)/(massT + massC); double Ccr = 2.00 * std::sqrt(fact); array_1d<double ,3> normal_T = ZeroVector(3); array_1d<double ,3> normal_C = ZeroVector(3); array_1d<double ,3> Center_T = (GTarget)->Center(); array_1d<double ,3> Center_C = (GContactor)->Center(); noalias(normal_T) = Center_C - Center_T; noalias(normal_C) = Center_T - Center_C; noalias(normal_T) = (1.00/norm_2(normal_T)) * normal_T; noalias(vel) = vel_T - vel_C; double vrn = inner_prod(vel, normal_T); double fnd = std::fabs(damp * Ccr * vrn); for(unsigned int i = 0; i<(*GTarget).size(); i++) { array_1d<double, 3>& rhs = (*GTarget)(i)->FastGetSolutionStepValue(RHS); noalias(rhs) = 0.33333333333333 * fnd * normal_T; } for(unsigned int i = 0; i<(*GContactor).size(); i++) { array_1d<double, 3>& rhs = (*GContactor)(i)->FastGetSolutionStepValue(RHS); noalias(rhs) = 0.33333333333333 * fnd * normal_C; } KRATOS_CATCH("") } /* Triangle to Triangle */ //Compute the normal contact force void ComputeContactForce2D(const PointerType& Target, const PointerType& Contactor) { KRATOS_TRY const double R0 = 0.00; const double R1 = 1.00; const double R2 = 2.00; const double RP5 = 0.50; const double RP15 = 1.50; // unsigned int icontact = 0; unsigned int np = 0; double a0,a1,a2,b0,b1,b2,c0,c1,c2,n0,n1,n2,fn,fna,fnb; double pen,tmp,dmin2,smin,smax; double small = EPSILON; double nsmall= -EPSILON; double big = BEPSILON; array_1d<double,10> p; array_1d<double,10> s; array_1d<double,3> fx; array_1d<double,3> fy; array_1d<double,2> vol; array_1d<array_1d<double,3>,2> rx; array_1d<array_1d<double,3>,2> ry; array_1d<array_1d<double,3>,2> nx; array_1d<array_1d<double,3>,2> ny; array_1d<array_1d<array_1d<double,3>,3>,2> d; double vol2 = 0.00; double pen_vec_tar = mpenalty_factor * (Target->GetProperties()[YOUNG_MODULUS]); double pen_vec_con = mpenalty_factor * (Contactor->GetProperties()[YOUNG_MODULUS]); pen = std::min(pen_vec_tar, pen_vec_con); ///penalty term //Element::GeometryType& Tgeom = Target->GetGeometry(); //Element::GeometryType& Cgeom = Contactor->GetGeometry(); std::vector<Element::GeometryType::Pointer> Geom(2); Geom[0] = Contactor->pGetGeometry(); Geom[1] = Target->pGetGeometry(); for(unsigned int i=0; i<2; i++) { for(unsigned int j=0; j<3; j++) { rx[i][j] = ((*Geom[i])(j))->X(); ry[i][j] = ((*Geom[i])(j))->Y(); } } for(unsigned int i=0; i<2; i++) { vol[i]=(rx[i][1]-rx[i][0])*(ry[i][2]-ry[i][0])- (ry[i][1]-ry[i][0])*(rx[i][2]-rx[i][0]); //normales salientes no unitarias de las aristas de los elementos unsigned int k = 0; for(unsigned int j=0; j<3; j++) { k= j+1; if(k>2) k=0; nx[i][j]=ry[i][k]-ry[i][j]; ny[i][j]=rx[i][j]-rx[i][k]; } } //computing the tranformation of nodal coordinate to the local coordinate for(unsigned int i=0; i<2; i++) { unsigned int j = i+1; if(j>1) j=0; for(unsigned int k=0; k<3; k++) { for(unsigned int l=0; l<3; l++) { d[i][k][l]=((rx[j][l]-rx[i][k])*nx[j][l]+ (ry[j][l]-ry[i][k])*ny[j][l])/vol[j]; } } } dmin2=big; /* main loop */ for(unsigned int it=0; it<2; it++) { unsigned int jt=it+1; if(jt>1)jt=0; noalias(fx) = ZeroVector(3); noalias(fy) = ZeroVector(3); vol2 = vol[jt]*vol[jt]; n0 = (nx[jt][0]*nx[jt][0]+ny[jt][0]*ny[jt][0])/(vol2); n1 = (nx[jt][1]*nx[jt][1]+ny[jt][1]*ny[jt][1])/(vol2); n2 = (nx[jt][2]*nx[jt][2]+ny[jt][2]*ny[jt][2])/(vol2); for(unsigned int in=0; in<3; in++) { unsigned int jn=in+1; if(jn>2)jn=0; a0=d[it][in][0]; a1=d[it][in][1]; a2=d[it][in][2]; b0=d[it][jn][0]; b1=d[it][jn][1]; b2=d[it][jn][2]; c0=d[jt][0][in]; c1=d[jt][1][in]; c2=d[jt][2][in]; /* check if contact */ if((((c0>nsmall)&&(c1>nsmall)&&(c2>nsmall))|| ((c0<small)&&(c1<small)&&(c2<small)))|| (((a0<small)&&(b0<small))||((a1<small)&&(b1<small))|| ((a2<small)&&(b2<small)))) { if((a0<=a1)&&(a0<=a2)) { dmin2=std::min(dmin2,(a0*a0/n0)); } else if((a1<=a0)&&(a1<=a2)) { dmin2=std::min(dmin2,(a1*a1/n1)); } else { dmin2=std::min(dmin2,(a2*a2/n2)); } } else { // icontact=it; /* domain of contact */ smin=R0; smax=R1; if((a0<R0)&&(b0>small))smin=std::max(smin,(a0/(a0-b0))); if((a1<R0)&&(b1>small))smin=std::max(smin,(a1/(a1-b1))); if((a2<R0)&&(b2>small))smin=std::max(smin,(a2/(a2-b2))); if((a0>small)&&(b0<R0))smax=std::min(smax,(a0/(a0-b0))); if((a1>small)&&(b1<R0))smax=std::min(smax,(a1/(a1-b1))); if((a2>small)&&(b2<R0))smax=std::min(smax,(a2/(a2-b2))); if(smax>smin) { s[0]=smin; p[0]=std::min((a0+smin*(b0-a0)),(a1+smin*(b1-a1))); p[0]=std::min(p[0],(a2+smin*(b2-a2))); np=1; /* intermediate points */ tmp=b0-a0+a1-b1; if((std::fabs(tmp))>small) { tmp=(a1-a0)/tmp; if((tmp>smin)&&(tmp<smax)&& ((a0+tmp*(b0-a0))<(a2+tmp*(b2-a2)))) { s[np]=tmp; p[np]=a0+tmp*(b0-a0); np=np+1; } } tmp=b0-a0+a2-b2; if((std::fabs(tmp))>small) { tmp=(a2-a0)/tmp; if((tmp>smin)&&(tmp<smax)&& ((a0+tmp*(b0-a0))<(a1+tmp*(b1-a1)))) { s[np]=tmp; p[np]=a0+tmp*(b0-a0); np=np+1; } } tmp=b1-a1+a2-b2; if((std::fabs(tmp))>small) { tmp=(a2-a1)/tmp; if((tmp>smin)&&(tmp<smax)&& ((a1+tmp*(b1-a1))<(a0+tmp*(b0-a0)))) { s[np]=tmp; p[np]=a1+tmp*(b1-a1); np=np+1; } } s[np]=smax; p[np]=std::min((a0+smax*(b0-a0)),(a1+smax*(b1-a1))); p[np]=std::min(p[np],(a2+smax*(b2-a2))); np=np+1; /* order intermediate points */ for(unsigned ip=0; ip<(np-1); ip++) { for(unsigned int jp=(ip+1); jp<np; jp++) { if(s[ip]>s[jp]) { tmp=s[jp]; s[jp]=s[ip]; s[ip]=tmp; tmp=p[jp]; p[jp]=p[ip]; p[ip]=tmp; } } } /* integrate normal force */ fn=p[0]*(s[1]-s[0])+p[np-1]*(s[np-1]-s[np-2]); fnb=p[0]*(s[1]-s[0])*(s[1]+R2*s[0])+ p[np-1]*(s[np-1]-s[np-2])*(s[np-2]+R2*s[np-1]); for(unsigned int ip=1; ip<(np-1); ip++) { fn=fn+p[ip]*(s[ip+1]-s[ip-1]); fnb=fnb+p[ip]*( (s[ip]-s[ip-1])*(s[ip-1]+R2*s[ip])+ (s[ip+1]-s[ip])*(s[ip+1]+R2*s[ip])); } fnb=fnb*pen*RP5; fn=fn*pen*RP15; fna=fn-fnb; /* update total force */ fx[in]=fx[in]-fna*nx[it][in]; fy[in]=fy[in]-fna*ny[it][in]; fx[jn]=fx[jn]-fnb*nx[it][in]; fy[jn]=fy[jn]-fnb*ny[it][in]; } } } //if(icontact==it) /* update nodal forces */ { Element::GeometryType& this_geom_1 = (*Geom[it]); Element::GeometryType& this_geom_2 = (*Geom[jt]); for(unsigned int in=0; in<3; in++) { array_1d<double,3>& node_rhs_1 = this_geom_1(in)->FastGetSolutionStepValue(RHS); array_1d<double,3>& normal_1 = this_geom_1(in)->FastGetSolutionStepValue(NORMAL); this_geom_1[in].SetLock(); node_rhs_1[0] += 0.50 * fx[in]; node_rhs_1[1] += 0.50 * fy[in]; node_rhs_1[2] = 0.00; normal_1[0] += 0.50 * fx[in]; normal_1[1] += 0.50 * fy[in]; normal_1[2] = 0.00; this_geom_1[in].UnSetLock(); unsigned int ie=in+1; if(ie>2)ie=0; for(unsigned int jn=0; jn<3; jn++) { array_1d<double,3>& node_rhs_2 = this_geom_2(in)->FastGetSolutionStepValue(RHS); array_1d<double,3>& normal_2 = this_geom_2(in)->FastGetSolutionStepValue(NORMAL); this_geom_2[jn].SetLock(); node_rhs_2[0] -= 0.50 * fx[jn]*d[it][jn][ie]; node_rhs_2[1] -= 0.50 * fy[jn]*d[it][jn][ie]; node_rhs_2[2] = 0.00; normal_2[0] -= 0.50 * fx[jn]*d[it][jn][ie]; normal_2[1] -= 0.50 * fy[jn]*d[it][jn][ie]; normal_2[2] = 0.00; this_geom_2[jn].UnSetLock(); } } } } KRATOS_CATCH("") } /* tetrahedra to tetrahedra */ void ComputeContactForce3D(const PointerType& Target, const PointerType& Contactor) { const double R0 = 0.00; const double R1 = 1.00; const double R2 = 2.00; const double R5 = 5.00; const double RP1 = 0.10; const double RP25 = 0.25; const double RP5 = 0.50; double tmp,theigh,penetr,peneto,penetu,penetv,penalty; double force,forco,uforc,vforc,factor,fact0,facti,fact1; double xorig,yorig,zorig,xe[2],ye[2],ze[2],dct[4]; double dsc[6][3],dcs[3][6],us[6],vs[6],ub[10],vb[10],anb[10],penetb[10]; double xt[4],yt[4],zt[4],ut[4],vt[4],ft[4],xcent,ycent,zcent,xnt,ynt,znt; double xc[4],yc[4],zc[4],uc[4],vc[4],fc[4],xcenc,ycenc,zcenc,xnc,ync,znc; double /*zone2,dmin2,*/factor1; // long /*kprop,icontact,ielem,jelem,icoup,jcoup,*/fnonzero; long i,j,k,inext,jnext,itars,icons; long nspoin,ninerc,niners,nbpoin,innerc[3],inners[6]; //long itarth,iconth; // long iptn[4],ipcn[4]; // long iptn1[4],ipcn1[4],m; NodePointerType ipt[4], ipc[4]; double pen_vec_tar = 50.00 * (Target->GetProperties()[YOUNG_MODULUS]); double pen_vec_con = 50.00 * (Contactor->GetProperties()[YOUNG_MODULUS]); Target->GetValue(IS_TARGET) = true; penalty = std::min(pen_vec_tar, pen_vec_con); ///penalty term Element::GeometryType& Tgeom = Target->GetGeometry(); Element::GeometryType& Cgeom = Contactor->GetGeometry(); //std::vector<Element::GeometryType::Pointer> Geom(2); //Geom[0] = Target->pGetGeometry(); //Geom[1] = Contactor->pGetGeometry(); /*set centres of contactor and target object */ xcent=R0; ycent=R0; zcent=R0; xcenc=R0; ycenc=R0; zcenc=R0; for(i=0; i<4; i++) { xcenc=xcenc+RP25*(Cgeom(i)->X()); ycenc=ycenc+RP25*(Cgeom(i)->Y()); zcenc=zcenc+RP25*(Cgeom(i)->Z()); xcent=xcent+RP25*(Tgeom(i)->X()); ycent=ycent+RP25*(Tgeom(i)->Y()); zcent=zcent+RP25*(Tgeom(i)->Z()); } /*********************************************************/ /* loop over target surfaces */ /*********************************************************/ //ipt->Guarda las conectividades del elemento for(itars=0; itars<4; itars++) { ipt[0] = Tgeom(itars); //i2elto[itars][itarth]; // iptn1[0]= itars; ipt[1] = Tgeom(1); //i2elto[1][itarth]; // iptn1[1]= 1; ipt[2] = Tgeom(2); //i2elto[2][itarth]; // iptn1[2]= 2; if(itars>0) { ipt[3] = Tgeom(itars-1); //i2elto[itars-1][itarth]; // iptn1[3]= itars-1; } else { ipt[3] = Tgeom(3); //i2elto[3][itarth]; // iptn1[3]= 3; } if((itars==1)||(itars==2)) { ipt[1] = Tgeom(3); //i2elto[3][itarth]; // iptn1[1]=3; } if(itars>1) { ipt[2] = Tgeom(0); //i2elto[0][itarth]; // iptn1[2]= 0; } /*****************************************************/ /* loop over contactor surfaces */ /*****************************************************/ for(icons=0; icons<4; icons++) { ipc[0] = Cgeom(icons); //i2elto[icons][iconth]; // ipcn1[0] = icons; ipc[1] = Cgeom(1); //i2elto[1][iconth]; // ipcn1[1] = 1; ipc[2] = Cgeom(2); //i2elto[2][iconth]; // ipcn1[2] = 2; if(icons>0) { ipc[3] = Cgeom(icons-1); //i2elto[icons-1][iconth]; // ipcn1[3] = icons-1; } else { ipc[3] = Cgeom(3); //i2elto[3][iconth]; // ipcn1[3] = 3; } if((icons==1)||(icons==2)) { ipc[1] = Cgeom(3); //i2elto[3][iconth]; // ipcn1[1]= 3; } if(icons>1) { ipc[2] = Cgeom(0); //i2elto[0][iconth]; // ipcn1[2] = 0; } // for(m=0; m<4; m++) // { // iptn[iptn1[m]]=m; // ipcn[ipcn1[m]]=m; // } /* set nodal coordinates */ for(i=0; i<3; i++) { xt[i] = ipt[i]->X(); yt[i] = ipt[i]->Y(); zt[i] = ipt[i]->Z(); xc[i] = ipc[i]->X(); yc[i] = ipc[i]->Y(); zc[i] = ipc[i]->Z(); } xt[3]=xcent; yt[3]=ycent; zt[3]=zcent; xc[3]=xcenc; yc[3]=ycenc; zc[3]=zcenc; xorig=xc[0]; yorig=yc[0]; zorig=zc[0]; for(i=0; i<4; i++) { xt[i]=xt[i]-xorig; yt[i]=yt[i]-yorig; zt[i]=zt[i]-zorig; xc[i]=xc[i]-xorig; yc[i]=yc[i]-yorig; zc[i]=zc[i]-zorig; } /* contactor normal, e-base and target points in e-base */ V3DCro(xnc,ync,znc,xc[1],yc[1],zc[1],xc[2],yc[2],zc[2]); V3DNor(xe[0],xnc,ync,znc); xe[0]=xc[1]; ye[0]=yc[1]; ze[0]=zc[1]; V3DNor(xe[1],xe[0],ye[0],ze[0]); V3DCro(xe[1],ye[1],ze[1],xnc,ync,znc,xe[0],ye[0],ze[0]); for(i=0; i<4; i++) { V3DDot(dct[i],xnc,ync,znc,xt[i],yt[i],zt[i]); V3DDot(ut[i],xt[i],yt[i],zt[i],xe[0],ye[0],ze[0]); V3DDot(vt[i],xt[i],yt[i],zt[i],xe[1],ye[1],ze[1]); } /* u,v coordinates of S-points and C-points */ nspoin=0; for(i=0; i<3; i++) { for(j=0; j<2; j++) { inext=i+1; if(inext>2)inext=0; if(j==0)inext=3; if(((dct[i]>EPSILON)&&(dct[inext]<NEPSILON))|| ((dct[i]<NEPSILON)&&(dct[inext]>EPSILON))) //Modified by JXiang { factor=std::fabs(dct[i]-dct[inext]); if(factor>EPSILON) { factor=std::fabs(dct[i]/factor); us[nspoin]=factor*ut[inext]+(R1-factor)*ut[i]; vs[nspoin]=factor*vt[inext]+(R1-factor)*vt[i]; inners[nspoin]=0; nspoin=nspoin+1; } } } } if((nspoin<3)||(nspoin>4))continue; /* check odering of S-points */ if(((us[1]-us[0])*(vs[2]-vs[0])-(vs[1]-vs[0])*(us[2]-us[0]))<R0) { i=0; j=nspoin-1; while(i<j) { k=inners[i]; inners[i]=inners[j]; inners[j]=k; tmp=us[i]; us[i]=us[j]; us[j]=tmp; tmp=vs[i]; vs[i]=vs[j]; vs[j]=tmp; i++; j--; } } for(i=0; i<3; i++) { V3DDot(uc[i],xc[i],yc[i],zc[i],xe[0],ye[0],ze[0]); V3DDot(vc[i],xc[i],yc[i],zc[i],xe[1],ye[1],ze[1]); innerc[i]=0; } /* distances of C-points from S edges */ niners=0; ninerc=0; for(i=0; i<nspoin; i++) { inext=i+1; if(inext>=nspoin)inext=0; for(j=0; j<3; j++) { jnext=j+1; if(jnext>2)jnext=0; dcs[j][i]=(uc[jnext]-uc[j])*(vs[i]-vc[j])-(vc[jnext]-vc[j])*(us[i]-uc[j]); dsc[i][j]=(us[inext]-us[i])*(vc[j]-vs[i])-(vs[inext]-vs[i])*(uc[j]-us[i]); if(dsc[i][j]>=R0) { innerc[j]=innerc[j]+1; if(innerc[j]==nspoin) ninerc=ninerc+1; } if(dcs[j][i]>=R0) { inners[i]=inners[i]+1; if(inners[i]==3) niners = niners+1; } } } /* B-points */ if(ninerc==3) /* triangle inside poligon */ { nbpoin=3; for(i=0; i<nbpoin; i++) { ub[i]=uc[i]; vb[i]=vc[i]; } } else if(niners==nspoin) /* poligon inside triangle */ { nbpoin=nspoin; for(i=0; i<nbpoin; i++) { ub[i]=us[i]; vb[i]=vs[i]; } } else /* intersection points poligon triangle */ { nbpoin=0; for(i=0; i<nspoin; i++) { if(inners[i]==3) { ub[nbpoin]=us[i]; vb[nbpoin]=vs[i]; nbpoin++; } } for(i=0; i<3; i++) /* grab inner C-points */ { if(innerc[i]==nspoin) { ub[nbpoin]=uc[i]; vb[nbpoin]=vc[i]; nbpoin++; } } for(i=0; i<nspoin; i++) /* intersection points */ { inext=i+1; if(inext>=nspoin)inext=0; for(j=0; j<3; j++) { jnext=j+1; if(jnext>2)jnext=0; if((((dsc[i][j]>EPSILON)&&(dsc[i][jnext]<NEPSILON))|| ((dsc[i][j]<NEPSILON)&&(dsc[i][jnext]>EPSILON)))&& (((dcs[j][i]>EPSILON)&&(dcs[j][inext]<NEPSILON))|| ((dcs[j][i]<NEPSILON)&&(dcs[j][inext]>EPSILON)))) //modified by JXiang { factor=std::fabs(dsc[i][j]-dsc[i][jnext]); if(factor<EPSILON) { factor=RP5; } else { factor=std::fabs(dsc[i][j]/factor); } ub[nbpoin]=(R1-factor)*uc[j]+factor*uc[jnext]; vb[nbpoin]=(R1-factor)*vc[j]+factor*vc[jnext]; nbpoin++; } } } for(i=1; i<nbpoin; i++) { if(vb[i]<vb[0]) { tmp=vb[i]; vb[i]=vb[0]; vb[0]=tmp; tmp=ub[i]; ub[i]=ub[0]; ub[0]=tmp; } } for(i=1; i<nbpoin; i++) { tmp=ub[i]-ub[0]; if((tmp<R0)&&(tmp>(-EPSILON))) { tmp=tmp-EPSILON; } else if((tmp>=R0)&&(tmp<EPSILON)) { tmp=tmp+EPSILON; } anb[i]=(vb[i]-vb[0]+EPSILON)/tmp; } for(i=1; i<nbpoin; i++) /* sort B-points */ { for(j=i+1; j<nbpoin; j++) { if(((anb[i]>=R0)&&(anb[j]>=R0)&&(anb[j]<anb[i]))|| ((anb[i]<R0)&&((anb[j]>=R0)||(anb[j]<anb[i])))) { tmp=vb[i]; vb[i]=vb[j]; vb[j]=tmp; tmp=ub[i]; ub[i]=ub[j]; ub[j]=tmp; tmp=anb[i]; anb[i]=anb[j]; anb[j]=tmp; } } } } if(nbpoin<3)continue; /* Target-plain normal and penetration at B-points */ V3DCro(xnt,ynt,znt,xt[1]-xt[0],yt[1]-yt[0],zt[1]-zt[0],xt[2]-xt[0],yt[2]-yt[0],zt[2]-zt[0]); V3DDot(theigh,xt[3]-xt[0],yt[3]-yt[0],zt[3]-zt[0],xnt,ynt,znt); /* penetration at origin of the e-base and dp/du dp/dv; */ V3DDot(peneto,xc[0]-xt[0],yc[0]-yt[0],zc[0]-zt[0],xnt,ynt,znt); V3DDot(penetu,xe[0],ye[0],ze[0],xnt,ynt,znt); V3DDot(penetv,xe[1],ye[1],ze[1],xnt,ynt,znt); peneto=peneto/theigh; penetu=penetu/theigh; penetv=penetv/theigh; for(i=0; i<nbpoin; i++) { penetb[i]=peneto+ub[i]*penetu+vb[i]*penetv; } /* force and center of force */ forco=R0; uforc=R0; vforc=R0; for(i=1; i<(nbpoin-1); i++) { penetr=penetb[0]+penetb[i]+penetb[i+1]; if(penetr>EPSILON) { force=((ub[i]-ub[0])*(vb[i+1]-vb[0])-(vb[i]-vb[0])*(ub[i+1]-ub[0]))*penetr*penalty; fact0=(RP5*penetb[0]+RP25*(penetb[i]+penetb[i+1]))/penetr; facti=(RP5*penetb[i]+RP25*(penetb[0]+ penetb[i+1]))/penetr; fact1=R1-fact0-facti; if(std::fabs(force+forco)>EPSILON) { uforc=(forco*uforc+force*(fact0*ub[0]+ facti*ub[i]+fact1*ub[i+1]))/(forco+force); vforc=(forco*vforc+force*(fact0*vb[0]+ facti*vb[i]+fact1*vb[i+1]))/(forco+force); forco=forco+force; } } } /*resultant at C-points */ for(i=0; i<4; i++) { fc[i]=R0; ft[i]=R0; } tmp=((uc[1]-uc[0])*(vc[2]-vc[0])- (vc[1]-vc[0])*(uc[2]-uc[0])); for(i=0; i<3; i++) { j=i+1; if(j>2)j=0; k=j+1; if(k>2)k=0; fc[k]=forco*(((uc[j]-uc[i])*(vforc-vc[i])- (vc[j]-vc[i])*(uforc-uc[i]))/tmp); } /*resultant at T-points*/ tmp=((ut[1]-ut[0])*(vt[2]-vt[0])-(vt[1]-vt[0])*(ut[2]-ut[0])); inext=-1; if(std::fabs(tmp)<RP1*theigh) { inext=0; tmp=std::fabs(ut[1]-ut[0])+std::fabs(vt[1]-vt[0]); for(i=0; i<3; i++) { j=i+1; if(j>2)j=0; if(tmp>(std::fabs(ut[j]-ut[i])+std::fabs(vt[j]-vt[i]))) { tmp=std::fabs(ut[j]-ut[i])+std::fabs(vt[j]-vt[i]); inext=i; } } j=inext+1; if(j>2)j=0; if(std::fabs(zt[j])>std::fabs(zt[inext]))inext=j; j=inext+1; if(j>2)j=0; k=j+1; if(k>2)k=0; tmp=(ut[k]-ut[j])*(vt[3]-vt[j])- (vt[k]-vt[j])*(ut[3]-ut[j]); } for(jnext=0; jnext<3; jnext++) { i=jnext; j=i+1; if(j>2)j=0; k=j+1; if(k>2)k=0; if(i==inext)i=3; if(j==inext)j=3; if(k==inext)k=3; ft[k]=forco*(((ut[j]-ut[i])*(vforc-vt[i])- (vt[j]-vt[i])*(uforc-ut[i]))/tmp); } ft[3]=RP25*ft[3]; for(i=0; i<3; i++) { ft[i]=ft[i]+ft[3]; } /* add forces into global vector */ factor1=R2/R5; // fnonzero=1; for(i=0; i<4; i++) { array_1d<double,3>& node_rhs_1 = (ipc[i])->FastGetSolutionStepValue(RHS); array_1d<double,3>& node_rhs_2 = (ipt[i])->FastGetSolutionStepValue(RHS); node_rhs_1[0] += fc[i]*xnc*factor1; node_rhs_1[1] += fc[i]*ync*factor1; node_rhs_1[2] += fc[i]*znc*factor1; node_rhs_2[0] -= ft[i]*xnc*factor1; node_rhs_2[1] -= ft[i]*ync*factor1; node_rhs_2[2] -= ft[i]*znc*factor1; } } } } //************************************************************************************ //************************************************************************************ // Funcion que se llama antes de la acualizacion de los desplazamientos void LocalSearch() { KRATOS_TRY IteratorType it_begin = mBoundaryElements.begin(); IteratorType it_end = mBoundaryElements.end(); BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end ); if(mrdimension==2) { SearchNearNode2D(rBinsObjectDynamic, it_begin, it_end); LocalSearch2D(rBinsObjectDynamic, it_begin, it_end); } else LocalSearch3D(rBinsObjectDynamic, it_begin, it_end); KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ bool SearchContactsPairs() { KRATOS_TRY std::cout<< std::endl; std::cout<<" COMPUTING CONTACT CONDITIONS TO MODEL PART " << std::endl; IteratorType it_begin = mBoundaryElements.begin(); IteratorType it_end = mBoundaryElements.end(); BinsObjectDynamic<Configure> rBinsObjectDynamic(it_begin, it_end ); rBinsObjectDynamic.SearchContact(mPairContacts); if(mrdimension==2) { LocalSearch2D(rBinsObjectDynamic, it_begin, it_end); FiltratePairContacts2D(mPairContacts); } else { //LocalSearch3D(rBinsObjectDynamic, it_begin, it_end); //std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl; //FiltratePairContacts3D(mPairContacts); std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl; } if(mPairContacts.size()!=0) { std::cout<< " NUMBER OF CONTACT PAIRS = " <<mPairContacts.size()<<std::endl; //KRATOS_THROW_ERROR(std::logic_error, "GetValue", ""); return true; } std::cout<< " NO CONTACTS PAIRS "<<std::endl; return false; KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void ResetValues() { KRATOS_TRY NodesArrayType& pNodes = mr_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { i->GetValue(IS_CONTACT_SLAVE) = 0; i->GetValue(IS_CONTACT_MASTER) = 0; i->GetValue(NODAL_VALUES) = 0; i->GetValue(DISTANCE) = DBL_MAX; } } KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void Clear(const unsigned int& initial_conditions_size) { KRATOS_TRY NodesArrayType& pNodes = mr_model_part.Nodes(); ElementsArrayType& pElements = mr_model_part.Elements(); ConditionsArrayType& pConditions = mr_model_part.Conditions(); //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { i->GetValue(IS_CONTACT_SLAVE) = 0; i->GetValue(IS_CONTACT_MASTER) = 0; i->GetValue(NODAL_VALUES) = 0; i->GetValue(DISTANCE) = DBL_MAX; i->GetValue(NEAR_NODE) = *(i.base()); } } //No se han producido nuevas condiciones de contorno if(mcompute_boundary_contour==true) { /// Borro las condiciones masters if(pConditions.size()>initial_conditions_size) { ModelPart::ConditionIterator end_previos = pConditions.begin() + initial_conditions_size; ModelPart::ConditionIterator end_actual = pConditions.end(); pConditions.erase(end_previos, end_actual); } FindElementalNeighboursProcess ElementosVecinos(mr_model_part, mrdimension, 10); FindNodalNeighboursProcess NodosVecinos(mr_model_part, mrdimension, 10); FindConditionsNeighboursProcess CondicionesVecinas(mr_model_part, mrdimension, 10); ElementosVecinos.ClearNeighbours(); NodosVecinos.ClearNeighbours(); CondicionesVecinas.ClearNeighbours(); ElementosVecinos.Execute(); NodosVecinos.Execute(); CondicionesVecinas.Execute(); vector<unsigned int> condition_partition; CreatePartition(number_of_threads, pConditions.size(), condition_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { ConditionsArrayType::iterator it_begin=pConditions.ptr_begin()+condition_partition[k]; ConditionsArrayType::iterator it_end=pConditions.ptr_begin()+condition_partition[k+1]; for (ConditionsArrayType::iterator it= it_begin; it!=it_end; ++it) { WeakPointerVector<Element>& rC = it->GetValue(NEIGHBOUR_ELEMENTS); rC.erase(rC.begin(),rC.end() ); } } vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { ElementsArrayType::iterator it_begin=pElements.ptr_begin()+element_partition[k]; ElementsArrayType::iterator it_end=pElements.ptr_begin()+element_partition[k+1]; for (ElementsArrayType::iterator it= it_begin; it!=it_end; ++it) { WeakPointerVector<Condition> & neighb_conds = it->GetValue(NEIGHBOUR_CONDITIONS); for (WeakPointerVector< Condition >::iterator neighb_cond = neighb_conds.begin(); neighb_cond != neighb_conds.end(); neighb_cond++) (neighb_cond->GetValue(NEIGHBOUR_ELEMENTS)).push_back( *(it.base())); } } mPairContacts.clear(); mBoundaryElements.clear(); mMasterConditionsArray.clear(); } KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void CreateLinkingConditionsBasedOnLocalSearch(const unsigned int& initial_conditions_size) { KRATOS_TRY if(mrdimension==2) CreateLinkingConditionsBasedOnLocalSearch2D(initial_conditions_size); else CreateLinkingConditionsBasedOnLocalSearch3D(initial_conditions_size); KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void CreateLinkingConditionsBasedOnLocalSearch2D(const unsigned int& initial_conditions_size) { KRATOS_TRY ConditionsArrayType& rConditions = mr_model_part.Conditions(); IntersectTriangleCases<Configure> IntersectTriangles(mr_model_part); array_1d<NodePointerType, 2> Ids; std::vector<NodePointerType> InsideNodes; std::vector<array_1d<NodePointerType, 2 > > Ids_2; std::vector<Near_Node> Is_Near; std::vector<ConditionsArrayType> LinkingConditions; unsigned int Id = rConditions.size() + 1; unsigned int properties_index = mr_model_part.NumberOfProperties(); PropertiesType::Pointer tempProperties = PropertiesType::Pointer(new PropertiesType(properties_index+1)); //mr_model_part.AddProperties(tempProperties); int Case = 0; unsigned int master = 0; unsigned int slave = 1; //bool is_repited = false; //bool corner = false; bool Change = true; // Near_Node Near = no_near; Exist_Node Exist = no_nodes; NodePointerType Id_Node_Case_5; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif //std::vector<ResultContainerType> Result(number_of_threads); ResultContainerType Result; /// creando bins de objetos BinsObjectDynamic<Configure> rBinsObjectDynamic(mBoundaryElements.begin(), mBoundaryElements.end()); ///bins de puntos //BinsDynamic<2, NodeType, NodesContainerType> BinsPoint(mBoundaryNodes.begin(), mBoundaryNodes.end()); /// busqueda local de los segmentos mas cercanos a un nodo //SearchNearNode2D(); SearchNearNode2D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end()); IdentifyMasterSegment2D(); //LocalSearch2D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end()); LinkingConditions.resize(number_of_threads); vector<unsigned int> partition; CreatePartition(number_of_threads, mBoundaryElements.size(), partition); ContactPairType it_pair; std::cout<<" PARTITION COMPUTING CONTACT CONDITIONS = " << number_of_threads << std::endl; std::cout<<" NUMBER OF INITIAL CONDITIONS = " << initial_conditions_size << std::endl; std::cout<<" NUMBER OF MASTER SURFACES CONDITIONS = " << rConditions.size()-initial_conditions_size << std::endl; // #pragma omp parallel for firstprivate (Case, Id_Node_Case_5, master, slave, /*is_repited, corner,*/ Change, Near, Exist) private (Result, Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near) #pragma omp parallel for firstprivate (Case, Id_Node_Case_5, master, slave, Change, Exist) private (Result, Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near) for(int k=0; k<number_of_threads; k++) { IteratorType it_begin = mBoundaryElements.begin() + partition[k]; IteratorType it_end = mBoundaryElements.begin() + partition[k+1]; for(IteratorType it =it_begin; it!=it_end; it++) { Result.clear(); Result.reserve(100); (*it)->GetValue(IS_TARGET)=true; rBinsObjectDynamic.SearchObjects(*it, Result); if(Result.size()!=0) { for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); ++rthis) { if(FiltratePairContacts(*it, *rthis)==true) { Exist = no_nodes; Case = 0; master = 0; slave = 1; (it_pair)[master] = (*rthis); (it_pair)[slave] = (*it); Ids_2.clear(); InsideNodes.clear(); NodeInside((it_pair)[0], (it_pair)[1], InsideNodes); if(InsideNodes.size()==0) { InsideNodes.clear(); ContactPairType it_pair_2; (it_pair_2)[master] = (*it); (it_pair_2)[slave] = (*rthis); NodeInside((it_pair_2)[0], (it_pair_2)[1], InsideNodes); if(InsideNodes.size()==0) { Exist = no_nodes; } else { Exist = yes_nodes; continue; } } else Exist = yes_nodes; switch(Exist) { case(yes_nodes): { Case = IntersectTriangles.LocateCaseItersection(Id_Node_Case_5, Change, InsideNodes, (it_pair)[master], (it_pair)[slave]); switch(Case) { /* case 1: /// un solo nodo dentro { Near = CheckNearNodes(master, slave, InsideNodes[0], (it_pair)[master], Ids); if(CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k])){ Id++; } break; } */ case 1 : case 2: /// dos nodos dentro { for(unsigned int in = 0; in<InsideNodes.size(); in++) { { // Near = CheckNearNodes(master, slave, InsideNodes[in], (it_pair)[master], Ids); if(CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k])) { Id++; } } } break; } case 3: { break; } case 5: { break; } } break; } case(no_nodes): { ComputeContactForce2D(((it_pair)[slave]), ((it_pair)[master])); /* ///Penalty unsigned int size_master = ((it_pair)[master])->GetValue(NEIGHBOUR_CONDITIONS).size(); unsigned int size_slave = ((it_pair)[slave])->GetValue(NEIGHBOUR_CONDITIONS).size(); if(size_master==1 && size_slave==1) { //std::cout<< " MASTER OBJECT = " << (it_pair)[master]->Id() <<" SLAVE OBJECT = " << (it_pair)[slave]->Id() << std::endl; CheckNearNodes(master, slave, (it_pair)[slave], (it_pair)[master], Ids_2, Is_Near); //KRATOS_WATCH(Ids_2.size()) if(CreateLinkingConditions(Id, master, slave, Ids_2[0], it_pair, tempProperties, Exist, LinkingConditions[k])){ Id++; } } else if(size_master>1 || size_slave>1) { ComputeContactForce2D(((it_pair)[slave]), ((it_pair)[master])); } */ break; } } } } } } } unsigned int size = 0; //adding linking to model_part for(int k=0; k<number_of_threads; k++) { size+=LinkingConditions[k].size(); for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it ) mr_model_part.Conditions().push_back(*it); } std::cout<<" NUMBER OF LINKING CONTACT CONDITIONS = " << size << std::endl; std::cout<<" TOTAL NUMBER CONDITIONS = " << rConditions.size() << std::endl; LinkingConditions.clear(); KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void CreateLinkingConditionsBasedOnLocalSearch3D(const unsigned int& initial_conditions_size) { KRATOS_TRY #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif ConditionsArrayType& rConditions = mr_model_part.Conditions(); array_1d<NodePointerType, 2 > Ids; std::vector<NodePointerType> InsideNodes; std::vector<array_1d<unsigned int, 2 > > Ids_2; std::vector<Near_Node> Is_Near; std::vector<ConditionsArrayType> LinkingConditions(number_of_threads); unsigned int Id = rConditions.size() + 1; unsigned int properties_index = mr_model_part.NumberOfProperties(); PropertiesType::Pointer tempProperties = PropertiesType::Pointer(new PropertiesType(properties_index+1) ); //mr_model_part.AddProperties(tempProperties); int Case = 0; // unsigned int Id_Node_Case_5 = 0; unsigned int master = 0; unsigned int slave = 1; // bool is_repited = false; // bool corner = false; // bool Change = true; // Near_Node Near = no_near; Exist_Node Exist = no_nodes; ResultContainerType Result; #ifdef _OPENMP double start_prod = omp_get_wtime(); #endif BinsObjectDynamic<Configure> rBinsObjectDynamic(mBoundaryElements.begin(), mBoundaryElements.end()); #ifdef _OPENMP double stop_prod = omp_get_wtime(); std::cout << " Time creating bins = " << stop_prod - start_prod << " seconds" <<std::endl; #endif LocalSearch3D(rBinsObjectDynamic, mBoundaryElements.begin(), mBoundaryElements.end()); vector<unsigned int> partition; CreatePartition(number_of_threads, mBoundaryElements.size(), partition); ContactPairType it_pair; std::cout<<" Number of threads used for contact = " << number_of_threads << std::endl; std::cout<<" Number of initial conditions = " << initial_conditions_size << std::endl; std::cout<<" Number of master surface conditions = " << rConditions.size()-initial_conditions_size << std::endl; #ifdef _OPENMP double start = omp_get_wtime(); #endif #pragma omp parallel for shared(LinkingConditions) private(Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near, Case, master, slave, Exist, Result) // #pragma omp parallel for shared(LinkingConditions) private(Id, it_pair, Ids, InsideNodes, Ids_2, Is_Near, Case, Id_Node_Case_5, master, slave, is_repited, corner, Change, Near, Exist, Result) for(int k=0; k<number_of_threads; k++) { IteratorType it_begin = mBoundaryElements.begin() + partition[k]; IteratorType it_end = mBoundaryElements.begin() + partition[k+1]; for(IteratorType it =it_begin; it!=it_end; it++) { Result.clear(); Result.reserve(100); rBinsObjectDynamic.SearchObjects(*it, Result); if(Result.size()!=0) { for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { if(FiltratePairContacts(*it, *rthis)==true) { Exist = no_nodes; Case = 0; master = 0; slave = 1; (it_pair)[master] = (*rthis); (it_pair)[slave] = (*it); Ids_2.clear(); InsideNodes.clear(); NodeInside((it_pair)[0], (it_pair)[1], InsideNodes); if(InsideNodes.size()==0) { InsideNodes.clear(); ContactPairType it_pair_2; (it_pair_2)[master] = (*it); (it_pair_2)[slave] = (*rthis); NodeInside((it_pair_2)[0], (it_pair_2)[1], InsideNodes); if(InsideNodes.size()==0) { Exist = no_nodes; } else { Exist = yes_nodes; continue; } } else Exist = yes_nodes; switch(Exist) { case(yes_nodes): { //std::cout<< " Yes Nodes" << std::endl; //std::cout<< " MASTER OBJECT = " << (it_pair)[master]->Id() <<" SLAVE OBJECT = " << (it_pair)[slave]->Id() << std::endl; Case = InsideNodes.size(); switch(Case) { /* case 1: // un solo nodo dentro { (InsideNodes[0])->GetValue(IS_CONTACT_SLAVE) = 1; Near = CheckNearNodes(master, slave, InsideNodes[0], (it_pair)[master], Ids); CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k]); Id++; break; } */ case 1: case 2: case 3: { for(unsigned int in = 0; in<InsideNodes.size(); in++) { if(InsideNodes[in]->GetValue(IS_CONTACT_SLAVE)==0) { InsideNodes[in]->GetValue(IS_CONTACT_SLAVE) = 1; // Near = CheckNearNodes(master, slave, InsideNodes[in], (it_pair)[master], Ids); CreateLinkingConditions(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions[k] ); Id++; } } break; } case 5: { break; } } break; } case(no_nodes): { break; } } Exist = no_nodes; // Near = no_near; // corner = false; // Change = true; // is_repited = false; Case = 0; // Id_Node_Case_5 = 0; master = 0; slave = 1; Ids_2.clear(); Is_Near.clear(); InsideNodes.clear(); } } } } } #ifdef _OPENMP double stop = omp_get_wtime(); std::cout << " Time Creating Linking Conditions = " << stop - start << " seconds" << std::endl; #endif int rId = rConditions.size() + 1; for(int k=0; k<number_of_threads; k++) { for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it ) { (*it)->SetId(rId); rId++; } } unsigned int size = 0; for(int k=0; k<number_of_threads; k++) { size+=LinkingConditions[k].size(); for(ConditionsArrayType::ptr_iterator it=LinkingConditions[k].ptr_begin(); it!= LinkingConditions[k].ptr_end(); ++it ) mr_model_part.Conditions().push_back(*it); } std::cout<<" Number of linking conditions = " << size << std::endl; std::cout<<" Total number of conditions = " << rConditions.size() << std::endl; LinkingConditions.clear(); KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** ///Permite decidir si el nodo que no esta dentro de un elemento es el correcto para ser el slave void VerifyCorrectSlaveNode(unsigned int& master, unsigned int& slave, const array_1d<NodePointerType, 2 >& Ids) { const unsigned int master_aux = master; const unsigned int slave_aux = slave; array_1d<double, 2> Distances; array_1d<double, 2> Points0; array_1d<double, 2> Points1; array_1d<double, 2> Points2; WeakPointerVector<Condition>& neighb_cond_slave = (Ids[slave_aux])->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_master = (Ids[master_aux])->GetValue(NEIGHBOUR_CONDITIONS); Segment2D rSegment; Points0[0] = (Ids[slave_aux])->X(); Points0[1] = (Ids[slave_aux])->Y(); double distance = DBL_MAX; double compare_distance = 0.00; for(WeakPointerVector<Condition>::iterator neighb = neighb_cond_master.begin(); neighb!= neighb_cond_master.end(); neighb++) { Condition::GeometryType& geom_2 = (neighb)->GetGeometry(); Points1[0] = geom_2[0].X(); Points1[1] = geom_2[0].Y(); Points2[0] = geom_2[1].X(); Points2[1] = geom_2[1].Y(); rSegment.AssignPointsAndComputeParameters(Points1, Points2); compare_distance = rSegment.DistPoint2Segment2D(Points0); if(compare_distance<distance) { distance = compare_distance; } } Distances[0] = distance; Points0[0] = (Ids[master_aux])->X(); Points0[1] = (Ids[master_aux])->Y(); distance = DBL_MAX; compare_distance = 0.00; for(WeakPointerVector<Condition>::iterator neighb = neighb_cond_slave.begin(); neighb!= neighb_cond_slave.end(); neighb++) { Condition::GeometryType& geom_2 = (neighb)->GetGeometry(); Points1[0] = geom_2[0].X(); Points1[1] = geom_2[0].Y(); Points2[0] = geom_2[1].X(); Points2[1] = geom_2[1].Y(); rSegment.AssignPointsAndComputeParameters(Points1, Points2); compare_distance = rSegment.DistPoint2Segment2D(Points0); if(compare_distance<distance) { distance = compare_distance; } } Distances[1] = distance; if( Distances[1]< Distances[0]) { master = slave_aux; slave = master_aux; } } //***************************************************************************************************** //***************************************************************************************************** void CreatePointLinkingConditions( const unsigned int& master, const unsigned int& slave, const array_1d<NodePointerType, 2 >& Ids, const ContactPairType& it_pair, const PropertiesType::Pointer& tempProperties, const unsigned int& Id, ConditionsArrayType& LinkingConditions ) { KRATOS_TRY // Slave Node Point2D<Node<3> >::Pointer point_geom_slave = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave]) ); Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom_slave) ); // Master Node Point2D<Node<3> >::Pointer point_geom_master = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[master])); Condition::Pointer MasterNode = Condition::Pointer(new MasterContactPointType(Id, point_geom_master) ); Condition::GeometryType& Mgeom = MasterNode->GetGeometry(); Condition::GeometryType& Sgeom = SlaveNode ->GetGeometry(); Line2D2<Node<3> >::Pointer Lgeom = Line2D2<Node<3> >::Pointer( new Line2D2<Node<3> >(Sgeom(0), Mgeom(0) ) ); Condition::Pointer newLink = Condition::Pointer( new PointPointContactLink(Id, Lgeom, tempProperties, SlaveNode, MasterNode ) ); LinkingConditions.push_back(newLink); KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** // cuando dos objetos intersectan pero no sabes que nodo cae dentro void CheckNearNodes( const unsigned int& master, const unsigned int& slave, const PointerType& SlaveObject, const PointerType& MasterObject, std::vector<array_1d<NodePointerType, 2 > >& Ids, std::vector<Near_Node>& Is_Near ) { KRATOS_TRY std::vector<double> Distance; std::vector<double> Distance_aux; std::vector<double>::iterator it; std::vector<double>::iterator it_2; array_1d<NodePointerType, 2 > Id; array_1d<double, 3> vector; const Element::GeometryType& geom_0 = MasterObject->GetGeometry(); const Element::GeometryType& geom_1 = SlaveObject->GetGeometry(); double distance = 0.00; array_1d<unsigned int, 9 > M; array_1d<unsigned int, 9 > S; M[0] = 0; M[1] = 0; M[2] = 0; M[3] = 1; M[4] = 1; M[5] = 1; M[6] = 2; M[7] = 2; M[8] = 2; S[0] = 0; S[1] = 1; S[2] = 2; S[3] = 0; S[4] = 1; S[5] = 2; S[6] = 0; S[7] = 1; S[8] = 2; // busco la distancia menor for(unsigned int i = 0; i<geom_0.size(); i++) { for(unsigned int j = 0; j<geom_1.size(); j++) { noalias(vector) = ( geom_0[i]-geom_1[j]) ; distance = norm_2(vector); Distance.push_back(distance); } } const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); const int position = int(it-Distance.begin()); Id[master] = geom_0(M[position]); Id[slave] = geom_1(S[position]); Ids.push_back(Id); Is_Near.push_back(no_near); /* //Check si dos corner chocan std::vector<NodePointerType> nodes; const bool test_one = VerifyToCornerIntersect(nodes, SlaveObject, MasterObject); if( test_one==false && nodes.size()!=0) { KRATOS_WATCH("BBBBBBBBBBB") if(nodes.size()==2) { Id[master] = nodes[0]; Id[slave] = nodes[1]; } else { // si no se cumple lo anterior tomamos los nodos mas cercanos // WARNING = Solo valido para un caso en que un solo nodo quede fuera const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); const int position = int(it-Distance.begin()); Id[master] = geom_0(M[position]); Id[slave] = geom_1(S[position]); } Ids.push_back(Id); Is_Near.push_back(no_near); } //NO VALIDO PARA ELEMTOS CON MAL RATIO else { Distance_aux.resize(Distance.size()); Distance_aux = Distance; const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); const int position = int(it-Distance.begin()); Id[master] = geom_0(M[position]); Id[slave] = geom_1(S[position]); Ids.push_back(Id); Is_Near.push_back(no_near); const double min_2 = (*min_element_2(Distance_aux.begin(), Distance_aux.end(), min ) ); it_2 = std::find(Distance.begin(), Distance.end(), min_2); const int position_2 = int(it_2-Distance.begin()); Id[master] = geom_0(M[position_2]); Id[slave] = geom_1(S[position_2]); Ids.push_back(Id); Is_Near.push_back(no_near); } */ KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** // Saca elsegundo min de un vector std::vector<double>::iterator min_element_2( const std::vector<double>::iterator first, const std::vector<double>::iterator last, const double& cond) { std::vector<double>::iterator second_lowest = first; std::vector<double>::iterator first_1 = first; std::vector<double>::iterator first_2 = first; const int size = int(last- first); int count = 0; if (first==last) return last; for(first_1=first; first_1!=last; first_1++) { if(*first_1!=cond) for(first_2=first; first_2!=last; first_2++) { if(*first_2>cond && *first_2!=*first_1) { if(*first_1<*first_2) { count++; continue; } else break; } } if(count==size-2) { *second_lowest = *first_1; break; } else count=0; } return second_lowest; } //***************************************************************************************************** //***************************************************************************************************** bool VerifyToCornerIntersect( std::vector<NodePointerType>& Ids, const PointerType& SlaveObject, const PointerType& MasterObject ) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = SlaveObject->GetValue(NEIGHBOUR_CONDITIONS); std::vector<std::vector<unsigned int> > segment; segment.resize(neighb_cond_slave.size()); vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; unsigned int I = 0; unsigned int II = 1; unsigned int III = 1; unsigned int IV = 0; Points0.resize(2, false); Points1.resize(2, false); for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond_slave.end(); ++cond_slave) { Condition::GeometryType& geom = cond_slave->GetGeometry(); Point[0] = 0.00; Point[1] = 0.00; Points0(0)[0] = geom[0].X(); Points0(0)[1] = geom[0].Y(); Points0(1)[0] = geom[1].X(); Points0(1)[1] = geom[1].Y(); I = 0; III = 0; for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); ++cond) { Condition::GeometryType& geom_3 = cond->GetGeometry(); Points1(0)[0] = geom_3[0].X(); Points1(0)[1] = geom_3[0].Y(); Points1(1)[0] = geom_3[1].X(); Points1(1)[1] = geom_3[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY) segment[IV].push_back(I); I++; III++; if(III>neighb_cond_master.size()) break; } II++; IV++; if(II>neighb_cond_slave.size()) break; } /// dos triangulos que se intersectan pero no tienen nodo dentro. /// Sus aristan chocan en dos partes del triangulo master if(segment.size()==3) if(segment[0].size()== 2 && segment[1].size()== 2 && segment[2].size()== 2) return true; if(neighb_cond_master.size()==2 && neighb_cond_slave.size()==2) { Condition::GeometryType& geom_1 = (neighb_cond_master(0).lock())->GetGeometry(); Condition::GeometryType& geom_2 = (neighb_cond_master(1).lock())->GetGeometry(); if(geom_1[0].Id()==geom_2[0].Id()) Ids.push_back(geom_1(0)); else if(geom_1[0].Id()==geom_2[1].Id()) Ids.push_back(geom_1(0)); else if(geom_1[1].Id()==geom_2[0].Id()) Ids.push_back(geom_1(1)); else if(geom_1[1].Id()==geom_2[1].Id()) Ids.push_back(geom_1(1)); else std::cout<< "No node A " << std::endl; Condition::GeometryType& geom_3 = (neighb_cond_slave(0).lock())->GetGeometry(); Condition::GeometryType& geom_4 = (neighb_cond_slave(1).lock())->GetGeometry(); if(geom_3[0].Id()==geom_4[0].Id()) Ids.push_back(geom_3(0)); else if(geom_3[0].Id()==geom_4[1].Id()) Ids.push_back(geom_3(0)); else if(geom_3[1].Id()==geom_4[0].Id()) Ids.push_back(geom_3(1)); else if(geom_3[1].Id()==geom_4[1].Id()) Ids.push_back(geom_3(1)); else std::cout<< "No node B " << std::endl; if(Ids.size()==2) return false; } return true; KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** Near_Node CheckNearNodes( const unsigned int& master, const unsigned int& slave, const NodePointerType& SlaveNode, const PointerType& MasterObject, array_1d<NodePointerType, 2 >& Ids ) { //std::vector<double> Distance; array_1d<double, 3> vector; array_1d<double, 3> coordinates = SlaveNode->Coordinates(); const Element::GeometryType& geom_0 = MasterObject->GetGeometry(); double distance = 0.00; double distance2 = 1E10;; // busco la distancia menor for(unsigned int i = 0; i<geom_0.size(); i++) { noalias(vector) = ( geom_0(i)->Coordinates() - coordinates); distance = norm_2(vector); //Distance.push_back(distance); if(distance<distance2) { distance2 = distance; Ids[master] = geom_0(i); } } Ids[slave] = SlaveNode; // double max = (*std::max_element(Distance.begin(), Distance.end() ) ); // double min = (*std::min_element(Distance.begin(), Distance.end() ) ); // double ratio = std::fabs(min/max); // if(ratio < 1E-8) // return yes_near; // return no_near; } bool CreateLinkingConditions( const unsigned int& Id, const unsigned int& master, const unsigned int& slave, const array_1d<NodePointerType, 2 >& Ids, const ContactPairType& it_pair, const PropertiesType::Pointer& tempProperties, Exist_Node& Exist, ConditionsArrayType& LinkingConditions ) { if(mrdimension==2) return CreateLinkingConditions2D(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions); else CreateLinkingConditions3D(Id, master, slave, Ids, it_pair, tempProperties, Exist, LinkingConditions); return false; } //***************************************************************************************************** //***************************************************************************************************** bool CreateLinkingConditions2D( const unsigned int& Id, const unsigned int& master, const unsigned int& slave, const array_1d<NodePointerType, 2 >& Ids, const ContactPairType& it_pair, const PropertiesType::Pointer& tempProperties, Exist_Node& Exist, ConditionsArrayType& LinkingConditions ) { KRATOS_TRY Condition::Pointer MasterFace; array_1d<double,3 > Normal_r; array_1d<double,3 > GL; ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); if(Exist==yes_nodes) { const bool exist_segment = LocateMasterSegment( Ids[slave], Ids[master], (it_pair)[master], (it_pair)[slave], MasterFace, Exist); if(exist_segment==true) { const double zero = 1.00E-6; Condition::GeometryType& geom = MasterFace->GetGeometry(); array_1d<double, 3>& point_slave = Ids[slave]->Coordinates(); array_1d<double, 3>& point_left = geom.GetPoint(0); array_1d<double,3> seg = geom.GetPoint(0)-geom.GetPoint(1); noalias(GL) = point_slave - point_left; MasterFace->Calculate(NORMAL, Normal_r, CurrentProcessInfo); //const double distance = norm_2(seg); //const double gat = inner_prod(GL, (1.00/distance)*seg) ; const double gap = inner_prod(GL, Normal_r); bool is_repited = bool(Ids[slave]->GetValue(IS_CONTACT_SLAVE)==0); if(gap<zero && is_repited==true) // && gat>zero && gat<distance) { Ids[slave]->GetValue(IS_CONTACT_SLAVE) = 1; Point2D<Node<3> >::Pointer point_geom = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave])); Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom) ); Condition::GeometryType& Mgeom = MasterFace->GetGeometry(); Condition::GeometryType& Sgeom = SlaveNode->GetGeometry(); //std::cout<<"Master = "<< (it_pair)[master]->Id() <<" Slave = " << (it_pair)[slave]->Id() <<std::endl; //std::cout<<" Node (Y) = " << Ids[slave]->Id() << " Master Face = " << MasterFace->Id() << std::endl; Triangle2D3<Node<3> >::Pointer Lgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1) ) ); Condition::Pointer newLink = Condition::Pointer( new PointSegmentContactLink(Id, Lgeom, tempProperties, MasterFace, SlaveNode)); LinkingConditions.push_back( newLink ); return exist_segment; } } } else if(Exist==no_nodes) { //KRATOS_WATCH(Ids[slave]->Id()) Point2D<Node<3> >::Pointer point_geom = Point2D<Node<3> >::Pointer( new Point2D<Node<3> >(Ids[slave])); Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPointType(Id, point_geom) ); //KRATOS_WATCH((it_pair)[master]->Id()) //KRATOS_WATCH((it_pair)[master]->GetValue(NEIGHBOUR_CONDITIONS).size()) MasterFace = ((it_pair)[master]->GetValue(NEIGHBOUR_CONDITIONS)(0)).lock(); //std::cout<<" Master = " << (it_pair)[master]->Id() <<" Slave = " << (it_pair)[slave]->Id() <<std::endl; //std::cout<<" Node (N) = " << Ids[slave]->Id() << " Master Face = " << MasterFace->Id() << std::endl; Condition::GeometryType& Mgeom = MasterFace->GetGeometry(); Condition::GeometryType& Sgeom = SlaveNode->GetGeometry(); Triangle2D3<Node<3> >::Pointer Lgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1) ) ); Condition::Pointer newLink = Condition::Pointer( new PointSegmentContactLink(Id, Lgeom, tempProperties, MasterFace, SlaveNode)); LinkingConditions.push_back( newLink ); return true; } else return false; return false; KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void CreateLinkingConditions3D( const unsigned int& Id, const unsigned int& master, const unsigned int& slave, const array_1d<NodePointerType, 2 >& Ids, const ContactPairType& it_pair, const PropertiesType::Pointer& tempProperties, Exist_Node& Exist, ConditionsArrayType& LinkingConditions ) { KRATOS_TRY //bool is_repited = bool(Ids[slave]->GetValue(IS_CONTACT_SLAVE)==0) //if(is_repited==true) { Point MasterContactLocalPoint; Point SlaveContactLocalPoint; int SlaveIntegrationPointIndex = 0; Condition::Pointer MasterFace = (Ids[slave])->GetValue(CONTACT_LINK_MASTER); Point3D<Node<3> >::Pointer point_geom = Point3D<Node<3> >::Pointer( new Point3D<Node<3> >(Ids[slave])); Condition::Pointer SlaveNode = Condition::Pointer(new SlaveContactPoint3D(Id, point_geom) ); Condition::GeometryType& Mgeom = MasterFace->GetGeometry(); Condition::GeometryType& Sgeom = SlaveNode->GetGeometry(); Tetrahedra3D4<Node<3> >::Pointer Lgeom = Tetrahedra3D4<Node<3> >::Pointer( new Tetrahedra3D4<Node<3> >( Sgeom(0), Mgeom(0), Mgeom(1), Mgeom(2) ) ); Condition::Pointer newLink = Condition::Pointer( new ContactLink3DExplicit( Id, Lgeom, tempProperties, MasterFace, SlaveNode, MasterContactLocalPoint, SlaveContactLocalPoint, SlaveIntegrationPointIndex )); LinkingConditions.push_back( newLink ); } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** bool LocateMasterSegment( const NodePointerType& SlaveNode, const NodePointerType& MasterNode, //the most near const PointerType& MasterObject, const PointerType& SlaveObject, Condition::Pointer& MasterFace, Exist_Node& Exist ) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) { switch(Exist) { case(yes_nodes): { if(neighb_cond.size()==1) { Condition::Pointer rCond_2; Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER); //problema. Los Elementos internos no tienen master face //KRATOS_WATCH(SlaveNode->Id()) //KRATOS_WATCH(rCond_1->Id()) const bool test_2 = Test_Four(SlaveNode, MasterObject, rCond_2); //KRATOS_WATCH(MasterObject->Id()) //KRATOS_WATCH(SlaveNode->Id()) //KRATOS_WATCH(rCond_1->Id()) const bool test_3 = bool(neighb_cond(0).lock()->Id()==rCond_1->Id()); //KRATOS_WATCH("-----------------") if(test_2==true) { if(rCond_1->Id()==rCond_2->Id()) MasterFace = rCond_1; else MasterFace = rCond_2; ///WARNING } else if( test_3==true) { MasterFace = rCond_1; } else if(neighb_cond.size()!=0) MasterFace = neighb_cond(0).lock(); return true; } if(neighb_cond.size()>=2) { Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER); const bool is_corner = Is_Corner(SlaveNode, MasterObject); if(is_corner==true) { Test_One_B_Distances(SlaveNode, MasterObject, rCond_1); MasterFace = rCond_1; return true; } const bool node_corner = Is_Node_Corner(SlaveNode, SlaveObject); if(node_corner==true) { Condition::Pointer rCond_2; Condition::Pointer rCond_3; Condition::Pointer rCond_4; const bool test_1 = Test_One_C (SlaveNode, MasterNode, rCond_2); //true==1 if(test_1==true) { if(rCond_2->Id()==rCond_1->Id()) MasterFace = rCond_1; else MasterFace = rCond_2; ///WARNING return true; } //if(SlaveNode->Id()==29) //std::cout<<"Test 1 " <<std::endl; const bool test_2 = Test_Three (SlaveNode, MasterObject, rCond_3); if(test_2==true) { if(rCond_3->Id()==rCond_1->Id()) MasterFace = rCond_1; else MasterFace = rCond_3; return true; } //if(SlaveNode->Id()==29) //std::cout<<"Test 2 " <<std::endl; /* if(SlaveNode->Id()==51) std::cout<<"Test 3 " <<std::endl; const bool test_3 = Test_Five (SlaveNode, MasterObject, rCond_4); if(test_3==true){ if(rCond_4->Id()==rCond_1->Id()) MasterFace = rCond_1; else MasterFace = rCond_4; ///WARNING rCond_3; return true; } */ } //std::cout<<" No test " <<std::endl; Condition::Pointer rCond_2; const bool test_4 = Test_One_B_Distances(SlaveNode, MasterObject, rCond_2); if(test_4==true) { if(rCond_2->Id()==rCond_1->Id()) MasterFace = rCond_1; else MasterFace = rCond_2; return true; } MasterFace = rCond_1; return true; } break; } case(no_nodes): { //Condition::Pointer& rCond_1 = SlaveNode->GetValue(CONTACT_LINK_MASTER); //Condition::Pointer rCond_2; //Condition::Pointer rCond_3; //Condition::Pointer rCond_4; //const bool test_1 = Test_One_C(SlaveNode, MasterNode, rCond_2); //const bool test_2 = Test_Two(SlaveNode, MasterObject, MasterFace) //const bool test_3 = Test_Four(SlaveNode, MasterObject, MasterFace); //const bool test_4 = Test_One_A(SlaveNode, MasterNode, MasterFace); // if(test_1==true){ // if(rCond_2->Id()==rCond_1->Id()) // MasterFace = rCond_1; // else // MasterFace = rCond_2; // return true; // } if(Test_One_C(SlaveNode, MasterNode, MasterFace)) { return true; } if(Test_Two(SlaveNode, MasterObject, MasterFace)) { return true; } if(Test_Four(SlaveNode, MasterObject, MasterFace) ) { return true; } if(Test_One_A(SlaveNode, MasterNode, MasterFace)) { return true; } MasterFace = SlaveNode->GetValue(CONTACT_LINK_MASTER); return true; break; } } return false; } MasterFace = SlaveNode->GetValue(CONTACT_LINK_MASTER); // no estaba: Si no funciona comenatr y poner return false. return true; // false KRATOS_CATCH("") } // Si el nodo esta dentro de elemento bool Is_Corner( const NodePointerType& SlaveNode, const PointerType& MasterObject) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS); std::vector<unsigned int> segment; std::vector<unsigned int>::iterator it; vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; unsigned int I = 0; unsigned int II = 1; unsigned int III = 1; Points0.resize(2, false); Points1.resize(2, false); for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond_slave.end(); ++cond_slave) { Condition::GeometryType& geom = cond_slave->GetGeometry(); Point[0] = 0.00; Point[1] = 0.00; Points0(0)[0] = geom[0].X(); Points0(0)[1] = geom[0].Y(); Points0(1)[0] = geom[1].X(); Points0(1)[1] = geom[1].Y(); I = 0; III = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); ++cond) { Condition::GeometryType& geom_3 = cond->GetGeometry(); Points1(0)[0] = geom_3[0].X(); Points1(0)[1] = geom_3[0].Y(); Points1(1)[0] = geom_3[1].X(); Points1(1)[1] = geom_3[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY) { if(segment.size()==0) { segment.push_back(I); } else { it = std::find(segment.begin(), segment.end(), I); if(it==segment.end()) segment.push_back(I); } } I++; III++; if(III>neighb_cond_master.size()) break; } II++; if(II>neighb_cond_slave.size()) break; } if(segment.size()>=2) return true; return false; KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ ///verifica si un nodo es corner bool Is_Node_Corner(const NodePointerType& SlaveNode, const PointerType& SlaveObject ) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_node = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_object = SlaveObject->GetValue(NEIGHBOUR_CONDITIONS); unsigned int count = 0; ///WARNING = Verificar para tres lados if(neighb_cond_object.size()>=2) for(unsigned int i = 0; i<neighb_cond_node.size(); i++) for(unsigned int j = 0; j<neighb_cond_object.size(); j++) if((neighb_cond_node(i).lock())->Id()==(neighb_cond_object(j).lock())->Id()) count++; if(count==2) return true; return false; KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ /// comparacion con desplazamientos bool Test_One_A( const NodePointerType& SlaveNode, const NodePointerType& MasterNode, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_master = MasterNode->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond_master.size()!=0) { std::vector<unsigned int> segment; unsigned int I = 0; unsigned int segmento = 0; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); Points0.resize(2, false); Points1.resize(2, false); Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); unsigned int JJ = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++) { Condition::GeometryType& geom_2 = cond->GetGeometry(); Points1(0)[0] = geom_2[0].X(); Points1(0)[1] = geom_2[0].Y(); Points1(1)[0] = geom_2[1].X(); Points1(1)[1] = geom_2[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT) { Points.push_back(Point); segment.push_back(I); } I++; JJ++; if(JJ>neighb_cond_master.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } else if (Points.size()>1) { double dist0 = 0; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double max = (*std::max_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), max); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond_master(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } bool Test_One_C( const NodePointerType& SlaveNode, const NodePointerType& MasterNode, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterNode->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0) { std::vector<unsigned int> segment; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; // test with edges unsigned int segmento = 0; unsigned int I = 0; unsigned int II = 1; unsigned int III = 1; Points0.resize(2, false); Points1.resize(2, false); for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave) { Condition::GeometryType& geom = cond_slave->GetGeometry(); Point[0] = 0.00; Point[1] = 0.00; Points0(0)[0] = geom[0].X(); Points0(0)[1] = geom[0].Y(); Points0(1)[0] = geom[1].X(); Points0(1)[1] = geom[1].Y(); I = 0; III = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond) { Condition::GeometryType& geom_3 = cond->GetGeometry(); Points1(0)[0] = geom_3[0].X(); Points1(0)[1] = geom_3[0].Y(); Points1(1)[0] = geom_3[1].X(); Points1(1)[1] = geom_3[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT) { Points.push_back(Point); segment.push_back(I); } I++; III++; if(III>neighb_cond.size()) break; } II++; if(II>neighb_cond_slave.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } // en caso de que el nodo quede fuera e intersecte con dos aristas else if (Points.size()>1) { Points0(0)[0] = SlaveNode->X(); Points0(0)[1] = SlaveNode->Y(); Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double max = (*std::max_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), max); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** /// Busca la interseccion de la trayectoria del nodo esclavo /// desde su tiempo actual hasta 3 paso atras con las aristas del /// elemento master. La menor de todas es el segmento de contacto. bool Test_One_B(const NodePointerType& SlaveNode, const NodePointerType& MasterNode, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_master = MasterNode->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond_master.size()!=0 ) { std::vector<unsigned int> segment; unsigned int segmento = 0; unsigned int I = 0; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); Points0.resize(2, false); Points1.resize(2, false); Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); /* array_1d<double, 2> Dir; Dir[0] = Points0(1)[0] - Points0(0)[0]; Dir[1] = Points0(1)[1] - Points0(0)[1]; noalias(Dir) = (1.00/(sqrt(inner_prod(Dir, Dir)))) * Dir; Points0(0)[0] -= Dir[0]; Points0(0)[1] -= Dir[1]; Points0(1)[0] += Dir[0]; Points0(1)[1] += Dir[1]; */ unsigned int JJ = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++) { Condition::GeometryType& geom_2 = cond->GetGeometry(); Points1(0)[0] = geom_2[0].X(); Points1(0)[1] = geom_2[0].Y(); Points1(1)[0] = geom_2[1].X(); Points1(1)[1] = geom_2[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY) { Points.push_back(Point); segment.push_back(I); } I++; JJ++; if(JJ>neighb_cond_master.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } else if (Points.size()>1) { double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond_master( segmento).lock(); return true; } } return false; KRATOS_CATCH("") } //Calculando distancias de puntos a segmentos bool Test_One_B_Distances( const NodePointerType& SlaveNode, const PointerType& MasterObject, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond_master = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); std::vector<unsigned int> segment; unsigned int I = 0; unsigned int segmento = 0; std::vector<double> Distances; // punto de interseccion del segmento array_1d<double, 2> Points0; vector<array_1d<double, 2> > Points1; Points1.resize(2, false); Points0[0] = SlaveNode->X(); Points0[1] = SlaveNode->Y(); Segment2D Segment1; unsigned int JJ = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond_master.begin(); cond!= neighb_cond_master.end(); cond++) { Condition::GeometryType& geom_2 = cond->GetGeometry(); Points1(0)[0] = geom_2[0].X(); Points1(0)[1] = geom_2[0].Y(); Points1(1)[0] = geom_2[1].X(); Points1(1)[1] = geom_2[1].Y(); Segment1.AssignPointsAndComputeParameters(Points1[0], Points1[1]); Distances.push_back(Segment1.DistPoint2Segment2D(Points0)); segment.push_back(I); I++; JJ++; if(JJ>neighb_cond_master.size()) break; } if (Distances.size()!=0) { if (Distances.size()==1) { segmento = segment[0]; } else if (Distances.size()>1) { std::vector<double>::iterator it; int position = 0; const double min = (*std::min_element(Distances.begin(), Distances.end() ) ); it = std::find(Distances.begin(), Distances.end(), min); position = int(it-Distances.begin()); segmento = segment[position]; } rCond = neighb_cond_master( segmento).lock(); return true; } return false; KRATOS_CATCH("") } /// para cercanos /// caso en que las aristas estan fuera bool Test_Two( const NodePointerType& SlaveNode, const PointerType& MasterObject, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0 ) { std::vector<unsigned int> segment; unsigned int I = 0; unsigned int segmento = 0; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); Points0.resize(2, false); Points1.resize(2, false); Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); unsigned int JJ = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); cond++) { Condition::GeometryType& geom_2 = cond->GetGeometry(); Points1(0)[0] = geom_2[0].X(); Points1(0)[1] = geom_2[0].Y(); Points1(1)[0] = geom_2[1].X(); Points1(1)[1] = geom_2[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT) { Points.push_back(Point); segment.push_back(I); } I++; JJ++; if(JJ>neighb_cond.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } else if (Points.size()>1) { double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double max = (*std::max_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), max); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } /// Busca la interseccion de la trayectoria de nodo esclavo /// con los egbes de los elemntos. A Diferencia del test_One_B este lo hace con los elementos; no con su nodo master. bool Test_Three(const NodePointerType& SlaveNode, const PointerType& MasterObject, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0 ) { std::vector<unsigned int> segment; unsigned int I = 0; unsigned int segmento = 0; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); Points0.resize(2, false); Points1.resize(2, false); Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); unsigned int JJ = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); cond++) { Condition::GeometryType& geom_2 = cond->GetGeometry(); Points1(0)[0] = geom_2[0].X(); Points1(0)[1] = geom_2[0].Y(); Points1(1)[0] = geom_2[1].X(); Points1(1)[1] = geom_2[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY) { Points.push_back(Point); segment.push_back(I); } I++; JJ++; if(JJ>neighb_cond.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } else if (Points.size()>1) { double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } /// para cercanos /// caso en que las aristas esten fuera de un elemento bool Test_Four( const NodePointerType& SlaveNode, const PointerType& MasterObject, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0) { array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); std::vector<unsigned int> segment; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; // test with edges unsigned int segmento = 0; unsigned int I = 0; unsigned int II = 1; unsigned int III = 1; Points0.resize(2, false); Points1.resize(2, false); for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave) { Condition::GeometryType& geom = cond_slave->GetGeometry(); Point[0] = 0.00; Point[1] = 0.00; Points0(0)[0] = geom[0].X(); Points0(0)[1] = geom[0].Y(); Points0(1)[0] = geom[1].X(); Points0(1)[1] = geom[1].Y(); I = 0; III = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond) { Condition::GeometryType& geom_3 = cond->GetGeometry(); Points1(0)[0] = geom_3[0].X(); Points1(0)[1] = geom_3[0].Y(); Points1(1)[0] = geom_3[1].X(); Points1(1)[1] = geom_3[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)==IT_POINT) { Points.push_back(Point); segment.push_back(I); } I++; III++; if(III>neighb_cond.size()) break; } II++; if(II>neighb_cond_slave.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } // en caso de que el nodo quede fuera e intersecte con dos aristas else if (Points.size()>1) { Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double max = (*std::max_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), max); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } /// Buscla interseccion de las aristas del nodos slave con las aristas del nodo master bool Test_Five( const NodePointerType& SlaveNode, const PointerType& MasterObject, Condition::Pointer& rCond) { KRATOS_TRY WeakPointerVector<Condition>& neighb_cond = MasterObject->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = SlaveNode->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0 && neighb_cond_slave.size()!=0) { array_1d<double,3>& old_pos = SlaveNode->FastGetSolutionStepValue(DISPLACEMENT,3); std::vector<unsigned int> segment; std::vector<array_1d<double, 2> > Points; // punto de interseccion del segmento vector<array_1d<double, 2> > Points0; vector<array_1d<double, 2> > Points1; array_1d<double, 2> Point; // test with edges unsigned int segmento = 0; unsigned int I = 0; unsigned int II = 1; unsigned int III = 1; Points0.resize(2, false); Points1.resize(2, false); for(WeakPointerVector<Condition>::iterator cond_slave = neighb_cond_slave.begin(); cond_slave!= neighb_cond.end(); ++cond_slave) { Condition::GeometryType& geom = cond_slave->GetGeometry(); Point[0] = 0.00; Point[1] = 0.00; Points0(0)[0] = geom[0].X(); Points0(0)[1] = geom[0].Y(); Points0(1)[0] = geom[1].X(); Points0(1)[1] = geom[1].Y(); I = 0; III = 1; for(WeakPointerVector< Condition >::iterator cond = neighb_cond.begin(); cond!= neighb_cond.end(); ++cond) { Condition::GeometryType& geom_3 = cond->GetGeometry(); Points1(0)[0] = geom_3[0].X(); Points1(0)[1] = geom_3[0].Y(); Points1(1)[0] = geom_3[1].X(); Points1(1)[1] = geom_3[1].Y(); if(IntersectionSegments::IntersectSegment(Point, Points0, Points1)!=IT_EMPTY) { Points.push_back(Point); segment.push_back(I); } I++; III++; if(III>neighb_cond.size()) break; } II++; if(II>neighb_cond_slave.size()) break; } if (Points.size()!=0) { if (Points.size()==1) { segmento = segment[0]; } // en caso de que el nodo quede fuera e intersecte con dos aristas else if (Points.size()>1) { Points0(0)[0] = SlaveNode->X0() + old_pos[0]; Points0(0)[1] = SlaveNode->Y0() + old_pos[1]; Points0(1)[0] = SlaveNode->X(); Points0(1)[1] = SlaveNode->Y(); double dist0 = 0.00; array_1d<double, 2> rect; std::vector<double> Distance; std::vector<double>::iterator it; int position = 0; for(unsigned int i = 0; i<Points.size(); i++) { rect = Points0[1] - Points[i]; dist0 = std::sqrt(inner_prod(rect, rect )); Distance.push_back(dist0); } const double min = (*std::min_element(Distance.begin(), Distance.end() ) ); it = std::find(Distance.begin(), Distance.end(), min); position = int(it-Distance.begin()); segmento = segment[position]; } rCond = neighb_cond(segmento).lock(); return true; } } return false; KRATOS_CATCH("") } //************************************************************************************ //************************************************************************************ void CalculateBoundaryContour2D(ConditionsArrayType& MasterConditions) { KRATOS_TRY //std::cout<< std::endl; std::cout<<" CALCULATING CONTOURS 2D" << std::endl; typedef WeakPointerVector< Element >::iterator ElementIteratorType; ContainerType& rElements = mr_model_part.ElementsArray(); ConditionsArrayType& rConditions = mr_model_part.Conditions(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); array_1d<NodePointerType,2> Pair; unsigned int face = 0; unsigned int Id = rConditions.size() + 1 ; bool is_repited = false; for(IteratorType elem = it_begin; elem!=it_end; elem++) { Element::GeometryType& geom_1 = (*elem)->GetGeometry(); WeakPointerVector< Element >& neighb_elems = (*elem)->GetValue(NEIGHBOUR_ELEMENTS); //WeakPointerVector< Condition >& neighb_cond = (*elem)->GetValue(NEIGHBOUR_CONDITIONS); //neighb_cond.clear(); ///WARNING //node_boundary.resize(neighb_elems.size(), false); // Puede incluir como vecnino el mismo en caso de que hayan menos de 3 elemtos veninos. // ckeck si se repited elmento // ElementIteratorType no necesita especificarse el * for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); neighb_elem++) { if (neighb_elem->Id() == (*elem)->Id() ) { if(face == 0) // edge 1-2 { Pair[0] = geom_1(1); Pair[1] = geom_1(2); CreateMasterConditions2D(Pair, elem, Id, MasterConditions); geom_1[1].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; geom_1[2].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; Id++; } if (face==1) // edge 2-0 { Pair[0] = geom_1(2); Pair[1] = geom_1(0); CreateMasterConditions2D(Pair, elem, Id, MasterConditions); geom_1[2].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; geom_1[0].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; Id++; } if (face==2) // edge 0-1 { Pair[0] = geom_1(0); Pair[1] = geom_1(1); CreateMasterConditions2D(Pair, elem, Id, MasterConditions); geom_1[0].GetValue(IS_BOUNDARY) = 1; //FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; geom_1[1].GetValue(IS_BOUNDARY) = 1; // FastGetSolutionStepValue(IS_BOUNDARY) = 1.00; Id++; } if(is_repited==false) { (*elem)->GetValue(IS_BOUNDARY) = 1; mBoundaryElements.push_back(*elem); is_repited = true; } } face++; } face = 0; is_repited = false; } unsigned int I = 0; NodesContainerType& rNodes = mr_model_part.NodesArray(); for(NodesIteratorType inode = rNodes.begin(); inode!=rNodes.end(); ++inode) { if((*inode)->GetValue(IS_BOUNDARY) == 1) { mBoundaryNodes.push_back(*inode); WeakPointerVector<Element>& neighb_elems = (*inode)->GetValue(NEIGHBOUR_ELEMENTS); I = 0; for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); ++neighb_elem) { if(neighb_elem->GetValue(IS_BOUNDARY)!=1) { neighb_elem->GetValue(IS_BOUNDARY)=1; mBoundaryElements.push_back(neighb_elems(I).lock()); } I++; } } } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void CalculateBoundaryContour3D(ConditionsArrayType& MasterConditions) { KRATOS_TRY //std::cout<< std::endl; std::cout<<"CALCULATING CONTOURS 3D"<< std::endl; typedef WeakPointerVector< Element >::iterator ElementIteratorType; ContainerType& rElements = mr_model_part.ElementsArray(); ConditionsArrayType& rConditions = mr_model_part.Conditions(); IteratorType it_begin = rElements.begin(); IteratorType it_end = rElements.end(); array_1d<NodePointerType,3> Pair; unsigned int face = 0; unsigned int Id = rConditions.size() + 1 ; bool is_repited = false; for(IteratorType elem = it_begin; elem!=it_end; elem++) { Element::GeometryType& geom_1 = (*elem)->GetGeometry(); WeakPointerVector< Element >& neighb_elems = (*elem)->GetValue(NEIGHBOUR_ELEMENTS); //WeakPointerVector< Condition >& neighb_cond = (*elem)->GetValue(NEIGHBOUR_CONDITIONS); for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); neighb_elem++) { if(neighb_elem->Id() == (*elem)->Id() ) { if(face == 0) { Pair[0] = geom_1(1); Pair[1] = geom_1(2); Pair[2] = geom_1(3); CreateMasterConditions3D(Pair, elem, Id, MasterConditions); geom_1[1].GetValue(IS_BOUNDARY) = 1; geom_1[2].GetValue(IS_BOUNDARY) = 1; geom_1[3].GetValue(IS_BOUNDARY) = 1; Id++; } if(face ==1) { Pair[0] = geom_1(0); Pair[1] = geom_1(3); Pair[2] = geom_1(2); CreateMasterConditions3D(Pair, elem, Id, MasterConditions); geom_1[0].GetValue(IS_BOUNDARY) = 1; geom_1[3].GetValue(IS_BOUNDARY) = 1; geom_1[2].GetValue(IS_BOUNDARY) = 1; Id++; } if(face == 2) { Pair[0] = geom_1(0); Pair[1] = geom_1(1); Pair[2] = geom_1(3); CreateMasterConditions3D(Pair, elem, Id, MasterConditions); geom_1[0].GetValue(IS_BOUNDARY) = 1; geom_1[1].GetValue(IS_BOUNDARY) = 1; geom_1[3].GetValue(IS_BOUNDARY) = 1; Id++; } if(face == 3) { Pair[0] = geom_1(0); Pair[1] = geom_1(2); Pair[2] = geom_1(1); CreateMasterConditions3D(Pair, elem, Id, MasterConditions); geom_1[0].GetValue(IS_BOUNDARY) = 1; geom_1[2].GetValue(IS_BOUNDARY) = 1; geom_1[1].GetValue(IS_BOUNDARY) = 1; Id++; } if(is_repited==false) { (*elem)->GetValue(IS_BOUNDARY) = 1; mBoundaryElements.push_back(*elem); is_repited = true; } } face++; } face = 0; is_repited = false; } unsigned int I = 0; NodesContainerType& rNodes = mr_model_part.NodesArray(); for(NodesIteratorType inode = rNodes.begin(); inode!=rNodes.end(); ++inode) { if((*inode)->GetValue(IS_BOUNDARY) == 1) { WeakPointerVector<Element>& neighb_elems = (*inode)->GetValue(NEIGHBOUR_ELEMENTS); I = 0; for( ElementIteratorType neighb_elem = neighb_elems.begin(); neighb_elem!= neighb_elems.end(); ++neighb_elem) { if(neighb_elem->GetValue(IS_BOUNDARY)!=1) { neighb_elem->GetValue(IS_BOUNDARY)=1; mBoundaryElements.push_back(neighb_elems(I).lock()); } I++; } } } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void CreateMasterConditions2D(const array_1d<NodePointerType,2>& rPair, const IteratorType& elem, const unsigned int& Id, ConditionsArrayType& MasterConditions) { KRATOS_TRY Line2D2<Node<3> >::Pointer pgeom = Line2D2<Node<3> >::Pointer (new Line2D2<Node<3> >( rPair[0], rPair[1] ) ) ; Condition::Pointer MasterSegment = Condition::Pointer(new MasterContactFaceType(Id, pgeom ) ) ; MasterSegment->GetValue(NEIGHBOUR_ELEMENTS).push_back(*(elem)); ((rPair)[0])->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment); ((rPair)[1])->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment); (*elem)->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSegment); MasterConditions.push_back(MasterSegment); ConditionsArrayType& pConditions = mr_model_part.Conditions(); pConditions.push_back(MasterSegment); KRATOS_CATCH("") } void CreateMasterConditions3D(const array_1d<NodePointerType,3>& rPair, const IteratorType& elem, const unsigned int& Id, ConditionsArrayType& MasterConditions) { KRATOS_TRY Triangle2D3<Node<3> >::Pointer pgeom = Triangle2D3<Node<3> >::Pointer( new Triangle2D3<Node<3> > ( rPair[0], rPair[1], rPair[2]) ); Condition::Pointer MasterSurface = Condition::Pointer(new MasterContactFace3D(Id, pgeom) ); MasterSurface->GetValue(NEIGHBOUR_ELEMENTS).push_back(*(elem)); MasterSurface->GetValue(IS_BOUNDARY) = 1; rPair[0]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface); rPair[1]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface); rPair[2]->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface); (*elem)->GetValue(NEIGHBOUR_CONDITIONS).push_back(MasterSurface); MasterConditions.push_back(MasterSurface); ConditionsArrayType& pConditions = mr_model_part.Conditions(); pConditions.push_back(MasterSurface); KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void FiltratePairContacts2D(ContainerContactPair& PairContacts) { KRATOS_TRY ContainerContactPair temp; std::vector<unsigned int> id; for(IteratorContainerContactPair ipair = PairContacts.begin(); ipair!=PairContacts.end(); ipair++) { if(SearchCommonNode( (*ipair)[0], (*ipair)[1], id )) { /// Se localiza que comparte dos nodos en comun if( id.size()!=2 && (SearchInsideNode((*ipair)[0], (*ipair)[1], id[0]))==true) { temp.push_back(*(ipair)); } } else { temp.push_back(*(ipair)); } id.clear(); } PairContacts.swap(temp); KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void FiltratePairContacts3D(ContainerContactPair& PairContacts) { KRATOS_TRY ContainerContactPair temp; std::vector<unsigned int> id; for(IteratorContainerContactPair ipair = PairContacts.begin(); ipair!=PairContacts.end(); ipair++) { if(SearchCommonNode( (*ipair)[0], (*ipair)[1], id )==false) { /* if( id.size()!=3){ for(unsigned int i = 0; i<id.size(); i++) if(SearchInsideNode((*ipair)[0], (*ipair)[1], id[i] )==true) { KRATOS_WATCH(id[i]) temp.push_back(*(ipair)); break; } } } else */ //{ temp.push_back(*(ipair)); //} id.clear(); } } PairContacts.swap(temp); KRATOS_CATCH("") } bool FiltratePairContacts(const PointerType& elem1, const PointerType& elem2) { KRATOS_TRY std::vector<unsigned int> id; if(mrdimension==2) { const bool test_1 = SearchCommonNode(elem1, elem2, id); if(test_1) { /// Se localiza que comparte dos nodos en comun if(id.size()==2) return false; else if(id.size()!=2 && (SearchInsideNode(elem1, elem2, id[0])==true)) return true; } else return true; } else { if(SearchCommonNode(elem1, elem2, id)) { return false; } else { return true; } } return false; KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** /// Se busca el nodo comun entre los contactos bool SearchCommonNode(const PointerType& elem1, const PointerType& elem2, std::vector<unsigned int>& id) { KRATOS_TRY Element::GeometryType& geom1 = (elem1)->GetGeometry(); Element::GeometryType& geom2 = (elem2)->GetGeometry(); /// buscando el nodo comun for(unsigned int i = 0; i<geom1.size(); i++) { for(unsigned int j = 0; j<geom1.size(); j++) { if(geom1[i].Id()==geom2[j].Id()) { id.push_back(geom1[i].Id()); } } } return id.size()!=0; //if( id.size()!=0) return true; //return false; KRATOS_CATCH("") } /// Verifica si los nodos que no es el comun cae dentro del elemento //***************************************************************************************************** //***************************************************************************************************** bool SearchInsideNode(const PointerType& elem1, const PointerType& elem2, const unsigned int& ide) { KRATOS_TRY Element::GeometryType& geom1 = (elem1)->GetGeometry(); Element::GeometryType& geom2 = (elem2)->GetGeometry(); array_1d<double, 3> result; ///CoordinatesArrayType result; /// buscando si uno de los nodos entra dentro del elemento for(unsigned int i = 0; i<geom1.size(); i++) { if(geom2[i].Id()!=ide) { if(geom1.IsInside(geom2[i], result)) { return true; } } } for(unsigned int i = 0; i<geom2.size(); i++) { if(geom1[i].Id()!=ide) { if(geom2.IsInside(geom1[i], result)) { return true; } } } return false; KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** void NodeInside(const PointerType& MasterObject, const PointerType& SlaveObject, std::vector<NodePointerType>& InsideNodes) { KRATOS_TRY Element::GeometryType& geom_master = MasterObject->GetGeometry(); Element::GeometryType& geom_slave = SlaveObject->GetGeometry(); std::vector<unsigned> Nodes; /// buscando el nodo comun bool commun = false; for(unsigned int i = 0; i<geom_slave.size(); i++) { commun = false; for(unsigned int j = 0; j<geom_master.size(); j++) { if(geom_slave[i].Id()==geom_master[j].Id()) { commun = true; } } if(commun==false) Nodes.push_back(i); } array_1d<double, 3> result; for (unsigned int i = 0; i<Nodes.size(); i++ ) { if(geom_master.IsInside(geom_slave[Nodes[i]], result)) { InsideNodes.push_back(geom_slave(Nodes[i])); } } KRATOS_CATCH("") } void ResetFlagComputeBoundaryContour(const bool& rflag) { mcompute_boundary_contour = rflag; } private: ModelPart mr_model_part; unsigned int mrdimension; double mpenalty_factor; bool mcompute_boundary_contour; NodesContainerType mBoundaryNodes; ContainerType mBoundaryElements; ContainerContactPair mPairContacts; ConditionsArrayType mMasterConditionsArray; //WeakPointerVector<NodeType> mBoundaryNodes; //***************************************************************************************************** //***************************************************************************************************** /// WARNING = To be parallel void IdentifyMasterSegment2D() { KRATOS_TRY std::cout<< " IDENTIFYING THE MASTER 2D SEGMENT " <<std::endl; NodesArrayType& pNodes = mr_model_part.Nodes(); ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = 1; //omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); //int I = 0; //double g = 0.00; //double g_old = 0.00; // double gl = 0.00; // double gr = 0.00; //double compare_distance = 0.00; double pr = 0.00; double pl = 0.00; double wr = 0.00; double wl = 0.00; array_1d<double, 3> CS = ZeroVector(3); array_1d<double, 3> CL = ZeroVector(3); array_1d<double, 3> CR = ZeroVector(3); array_1d<double, 3> Normal = ZeroVector(3); array_1d<double, 3> Normal_s = ZeroVector(3); array_1d<double, 3> Normal_r = ZeroVector(3); array_1d<double, 3> Normal_l = ZeroVector(3); array_1d<double, 3> GR = ZeroVector(3); array_1d<double, 3> GL = ZeroVector(3); double distance_r = 0.00; double distance_l = 0.00; double N = 0.00; array_1d<double, 3> e3; e3[0] = 0; e3[1] = 0; e3[2] = 1.00; Segment2D rSegment_r; Segment2D rSegment_l; //#pragma omp parallel for private(I, g, g_old, gl, gr, compare_distance, CS, CL, CR, Normal, rSegment) for(int k=0; k<number_of_threads; k++) { NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { if(i->GetValue(IS_BOUNDARY)==1) { WeakPointerVector<Condition>& neighb_cond = ((i)->GetValue(NEAR_NODE))->GetValue(NEIGHBOUR_CONDITIONS); WeakPointerVector<Condition>& neighb_cond_slave = (i)->GetValue(NEIGHBOUR_CONDITIONS); if( neighb_cond.size()!=0 && neighb_cond_slave.size()!=0 ) { array_1d<int,2> RL; //double& distance = i->GetValue(DISTANCE); const unsigned int& ID = i->GetValue(NEAR_NODE)->Id(); const unsigned int& ID_slave = i->Id(); /// the right and left RL[0] = 0; RL[1] = 0; if(neighb_cond(0).lock()->GetGeometry()(0)->Id()==ID) RL[0] = 1; else RL[1] = 1; const Condition::Pointer& first = neighb_cond(RL[0]).lock(); const Condition::Pointer& last = neighb_cond(RL[1]).lock(); /// the right and left RL[0] = 0; RL[1] = 0; if(neighb_cond_slave(0).lock()->GetGeometry()(0)->Id()==ID_slave) RL[0] = 1; else RL[1] = 1; const Condition::Pointer& first_s = neighb_cond_slave(RL[0]).lock(); const Condition::Pointer& last_s = neighb_cond_slave(RL[1]).lock(); Condition::GeometryType& geom_first = first->GetGeometry(); Condition::GeometryType& geom_last = last->GetGeometry(); array_1d<double, 3>& point_closest = i->GetValue(NEAR_NODE)->Coordinates(); array_1d<double, 3>& point_slave = i->Coordinates(); array_1d<double, 3>& point_left = geom_last.GetPoint(1); array_1d<double, 3>& point_right = geom_first.GetPoint(0); noalias(CS) = point_closest - point_slave; noalias(CL) = point_left - point_closest; noalias(CR) = point_closest - point_right; noalias(GR) = point_slave - point_right; noalias(GL) = point_slave - point_closest; //KRATOS_WATCH("CCCCCCCCCCC") //KRATOS_WATCH(i->Id()) //KRATOS_WATCH(neighb_cond_slave.size()) //KRATOS_WATCH(first_s->Id()) //KRATOS_WATCH(last_s->Id()) first_s->Calculate(NORMAL, Normal_r, CurrentProcessInfo); last_s->Calculate(NORMAL, Normal_l, CurrentProcessInfo); //KRATOS_WATCH("AKIIIIIIIIIIIIIIIII") noalias(Normal_s) = Normal_r + Normal_l; Normal_s = (1.00/norm_2(Normal_s)) * Normal_s; first->Calculate(NORMAL, Normal_r, CurrentProcessInfo); last->Calculate(NORMAL, Normal_l, CurrentProcessInfo); //const double& cs = norm_2(CS); const double& cl = norm_2(CL); const double& cr = norm_2(CR); noalias(CL) = CL * (1.00/cl); noalias(CR) = CR * (1.00/cr); // gr = inner_prod(GR,Normal_r); // gl = inner_prod(GL,Normal_l); pr = -inner_prod(GL,CR); if(pr<=0.00) pr = 0.00; pl = inner_prod(GL,CL); if(pl<=0.00) pl = 0.00; if(std::fabs(pr-pl)<=1E-14) { pr = 1.00; pl =1.00; } wr = pr/(pr + pl); wl = 1.00 - wr; N = norm_2( pr * Normal_r + pl * Normal_l ); noalias(Normal) = (1.00/N) * ( pr * Normal_r + pl * Normal_l); rSegment_r.AssignPointsAndComputeParameters(point_right, point_closest); rSegment_l.AssignPointsAndComputeParameters(point_closest, point_left); distance_r = rSegment_r.DistPoint2Segment2D(point_slave); distance_l = rSegment_l.DistPoint2Segment2D(point_slave); /// decidiendo el mejor segmento const double& dot_l = inner_prod(Normal_s,Normal_l); const double& dot_r = inner_prod(Normal_s,Normal_r); if(distance_r <= distance_l) { if(wr==wl) /// cualquier segmento es valido i->GetValue(CONTACT_LINK_MASTER) = first; else if(wr > wl) /// segmento derecho { if(dot_l < dot_r) i->GetValue(CONTACT_LINK_MASTER) = last; else i->GetValue(CONTACT_LINK_MASTER) = first; } else if(wr < wl) /// segmento derecho { if(dot_l < dot_r) i->GetValue(CONTACT_LINK_MASTER) = last; else i->GetValue(CONTACT_LINK_MASTER) = first; } } if(distance_r > distance_l) { if(wr==wl) /// cualquier segmento es valido i->GetValue(CONTACT_LINK_MASTER) = first; else if(wr > wl) /// segmento derecho { if(dot_l < dot_r) i->GetValue(CONTACT_LINK_MASTER) = last; else i->GetValue(CONTACT_LINK_MASTER) = first; } else if(wr < wl) /// segmento derecho { if(dot_l < dot_r) i->GetValue(CONTACT_LINK_MASTER) = last; else i->GetValue(CONTACT_LINK_MASTER) = first; } } // if( i->Id()==44) //|| i->Id()==68 || i->Id()==49) //|| i->Id()==189) //(i->Id()==926 || i->Id()==927 || i->Id()==910 || i->Id()==919 || i->Id()==905 || i->Id()==904 ) // { // KRATOS_WATCH(i->Id()) // // KRATOS_WATCH(ID) // // KRATOS_WATCH(first->Id()) // // KRATOS_WATCH(last->Id()) // // KRATOS_WATCH(pr) // // KRATOS_WATCH(pl) // // KRATOS_WATCH(wr) // // KRATOS_WATCH(wl) // // KRATOS_WATCH(gr) // // KRATOS_WATCH(gl) // // KRATOS_WATCH(distance_r) // // KRATOS_WATCH(distance_l) // // KRATOS_WATCH(Normal_r) // // KRATOS_WATCH(Normal_l) // // KRATOS_WATCH(Normal_s) // // KRATOS_WATCH(dot_l) // // KRATOS_WATCH(dot_r) // KRATOS_WATCH(i->GetValue(NEAR_NODE)->Id()); // KRATOS_WATCH(i->GetValue(CONTACT_LINK_MASTER)->Id()); // // const int& id = i->GetValue(CONTACT_LINK_MASTER)->Id(); // // if(id==58) { KRATOS_WATCH(id); KRATOS_THROW_ERROR(std::logic_error, "" , "");} // // if(id==46) { KRATOS_WATCH(id); KRATOS_THROW_ERROR(std::logic_error, "" , "");} // KRATOS_WATCH("---------------------") // } } } } } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** template<class TConfigure> void LocalSearch2D( BinsObjectDynamic<TConfigure>& rBins, const IteratorType& it_begin, const IteratorType& it_end) { std::cout<< " LOCAL SEARCH ALGORITHM " <<std::endl; unsigned int I = 0; double compare_distance = 0.00; ResultContainerType Result; array_1d<double, 3> Normal = ZeroVector(3); array_1d<double, 3> Mid_Point = ZeroVector(3); array_1d<double, 3> Vect = ZeroVector(3); Segment2D rSegment; for(IteratorType it = it_begin; it!=it_end; it++) { std::size_t size = rBins.SearchObjects(*it, Result); if(size!=0) { Element::GeometryType& geom = (*it)->GetGeometry(); for(unsigned int i = 0; i<geom.size(); i++) { if(geom(i)->GetValue(IS_BOUNDARY) == 1) { array_1d<double, 3>& Points0 = geom.GetPoint(i); double& distance = geom(i)->GetValue(DISTANCE); for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { I = 0; WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) { for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++) { //if(geom(i)->Id()==3) // KRATOS_WATCH(neighb->Id()) if(neighb->GetValue(NODAL_VALUES) == 0) { neighb->GetValue(NODAL_VALUES) = 1; Condition::GeometryType& geom_2 = (neighb)->GetGeometry(); //(neighb)->Calculate(NORMAL, Normal, CurrentProcessInfo); if( (geom_2(0)->Id() != geom(i)->Id()) && (geom_2(1)->Id() != geom(i)->Id()) ) { array_1d<double, 3>& Points1 = geom_2.GetPoint(0); array_1d<double, 3>& Points2 = geom_2.GetPoint(1); rSegment.AssignPointsAndComputeParameters(Points1, Points2); compare_distance = rSegment.DistPoint2Segment2D(Points0); if(compare_distance<distance) { distance = compare_distance; geom(i)->GetValue(CONTACT_LINK_MASTER) = neighb_cond(I).lock(); } } I++; } } } } //if(geom(i)->Id()==3){ //KRATOS_WATCH(geom(i)->GetValue(CONTACT_LINK_MASTER)->Id()) //KRATOS_WATCH("--------------------------") //} /// Reseting the values for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++) neighb->GetValue(NODAL_VALUES) = 0; } } } } Result.clear(); } } //***************************************************************************************************** //***************************************************************************************************** void SearchNearNode2D() { KRATOS_TRY std::cout<< " SEARCHING NEAR NODE " <<std::endl; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; int distance = std::distance(mBoundaryNodes.begin(), mBoundaryNodes.end()); CreatePartition(number_of_threads, distance, node_partition); BinsDynamic<2, NodeType, NodesContainerType> BinsPoint(mBoundaryNodes.begin(), mBoundaryNodes.end()); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { NodesIteratorType it_begin = mBoundaryNodes.begin() + node_partition[k]; NodesIteratorType it_end = mBoundaryNodes.begin() + node_partition[k+1]; for(NodesIteratorType inode = it_begin; inode!=it_end; inode++) { (*inode)->GetValue(NEAR_NODE) = BinsPoint.SearchNearestPoint(**inode); KRATOS_WATCH((*inode)->Id()) KRATOS_WATCH((*inode)->GetValue(NEAR_NODE)->Id()) KRATOS_WATCH("----------------------------") } } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** template<class TConfigure> void SearchNearNode2D( BinsObjectDynamic<TConfigure>& rBins, const IteratorType& it_begin, const IteratorType& it_end) { KRATOS_TRY std::cout<< " SEARCHING NEAR NODE 2D " <<std::endl; //ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); // unsigned int I = 0; double compare_distance = 0.00; ResultContainerType Result; array_1d<double, 3> Vect = ZeroVector(3); for(IteratorType it = it_begin; it!=it_end; it++) { Result.clear(); rBins.SearchObjects(*it, Result); ///SearchAroundObjectsInner(*it, Result); poner el cmentario para 2D if(Result.size()!=0) { Element::GeometryType& geom = (*it)->GetGeometry(); for(unsigned int i = 0; i<geom.size(); i++) { if(geom(i)->GetValue(IS_BOUNDARY) == 1) { array_1d<double, 3>& Points0 = geom.GetPoint(i); double& distance = geom(i)->GetValue(DISTANCE); for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { // I = 0; WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) { for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++) { if(neighb->GetValue(NODAL_VALUES) == 0) { neighb->GetValue(NODAL_VALUES) = 1; Condition::GeometryType& geom_2 = (neighb)->GetGeometry(); if( (geom_2(0)->Id() != geom(i)->Id()) && (geom_2(1)->Id() != geom(i)->Id()) ) { /// buscando el nodo mas cercano for(unsigned int k = 0; k<geom_2.size(); k++) { array_1d<double, 3>& Points1 = geom_2.GetPoint(k); noalias(Vect) = Points1 - Points0; compare_distance = norm_2(Vect); if(compare_distance<distance) { distance = compare_distance; geom(i)->GetValue(NEAR_NODE) = geom_2(k); } } } } } } } /// Reseting the values for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++) neighb->GetValue(NODAL_VALUES) = 0; } } } } } /// reseting the values of distance NodesArrayType& pNodes = mr_model_part.Nodes(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> node_partition; CreatePartition(number_of_threads, pNodes.size(), node_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { NodesArrayType::iterator i_begin=pNodes.ptr_begin()+node_partition[k]; NodesArrayType::iterator i_end=pNodes.ptr_begin()+node_partition[k+1]; for(ModelPart::NodeIterator i=i_begin; i!= i_end; ++i) { i->GetValue(DISTANCE) = DBL_MAX; } } KRATOS_CATCH("") } //***************************************************************************************************** //***************************************************************************************************** template<class TConfigure> void LocalSearch3D( BinsObjectDynamic<TConfigure>& rBins, const IteratorType& it_begin, const IteratorType& it_end) { KRATOS_TRY #ifdef _OPENMP double start_prod = omp_get_wtime(); #endif unsigned int I = 0; double distance = 0.00; double compare_distance = 0.00; ResultContainerType Result; array_1d<double, 3> Points0; array_1d<double, 3> Points1; array_1d<double, 3> Points2; array_1d<double, 3> Points3; //array_1d<double, 3> Normal; Plane rPlane; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> partition; int distance_2 = int(it_end-it_begin); CreatePartition(number_of_threads, distance_2, partition); #pragma omp parallel for private(I, distance, compare_distance, Result, Points0, Points1, Points2, Points3) for(int k=0; k<number_of_threads; k++) { IteratorType it_begin_1 = it_begin + partition[k]; IteratorType it_end_1 = it_begin + partition[k+1]; for(IteratorType it =it_begin_1; it!=it_end_1; it++) //for(IteratorType it = it_begin; it!=it_end; it++) { rBins.SearchObjects(*it, Result); if(Result.size()!=0) { Element::GeometryType& geom = (*it)->GetGeometry(); for(unsigned int i = 0; i<geom.size(); i++) { if(geom[i].GetValue(NODAL_VALUES)==0) { if(geom[i].GetValue(IS_BOUNDARY) == 1) { geom[i].SetLock(); geom[i].GetValue(NODAL_VALUES) = 1; geom[i].UnSetLock(); Points0[0] = geom[i].X(); Points0[1] = geom[i].Y(); Points0[2] = geom[i].Z(); distance = DBL_MAX; for(ResultIteratorType rthis = Result.begin(); rthis!=Result.end(); rthis++) { I = 0; //KRATOS_WATCH((*rthis)->Id()) WeakPointerVector<Condition>& neighb_cond = (*rthis)->GetValue(NEIGHBOUR_CONDITIONS); if(neighb_cond.size()!=0) { for(WeakPointerVector<Condition>::iterator neighb = neighb_cond.begin(); neighb!= neighb_cond.end(); neighb++) { Condition::GeometryType& geom_2 = (neighb)->GetGeometry(); //KRATOS_WATCH((neighb)->Id()) //KRATOS_WATCH(geom_2[0].Id()) //KRATOS_WATCH(geom_2[1].Id()) //KRATOS_WATCH(geom_2[2].Id()) if((geom_2[0].Id() != geom[i].Id()) && (geom_2[1].Id() != geom[i].Id()) && (geom_2[2].Id()!= geom[i].Id())) { Points1[0] = geom_2[0].X(); Points1[1] = geom_2[0].Y(); Points1[2] = geom_2[0].Z(); Points2[0] = geom_2[1].X(); Points2[1] = geom_2[1].Y(); Points2[2] = geom_2[1].Z(); Points3[0] = geom_2[2].X(); Points3[1] = geom_2[2].Y(); Points3[2] = geom_2[2].Z(); compare_distance = rPlane.DistPoint3Triangle3(Points0, Points1, Points2, Points3); //KRATOS_WATCH(compare_distance) if(compare_distance<distance) { distance = compare_distance; geom[i].SetLock(); geom[i].GetValue(CONTACT_LINK_MASTER) = neighb_cond(I).lock(); geom[i].UnSetLock(); } } I++; } } } //KRATOS_WATCH(geom[i].Id()) //KRATOS_WATCH(Result.size()) //KRATOS_WATCH(geom[i].GetValue(CONTACT_LINK_MASTER)) //KRATOS_WATCH("-----------------") } } } } Result.clear(); } } //std::cout<< " LOCAL SEARCH ALGORITHM " <<std::endl; #ifdef _OPENMP double stop_prod = omp_get_wtime(); std::cout << " Time Searching Masters Surfaces = " << stop_prod - start_prod << " seconds " << std::endl; #endif KRATOS_CATCH("") } inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } inline void V3DCro( double& x1, double& y1, double& z1, const double& x2, const double& y2, const double& z2, const double& x3, const double& y3, const double& z3) { x1 =(y2)*(z3)-(z2)*(y3); y1 =(z2)*(x3)-(x2)*(z3); z1 =(x2)*(y3)-(y2)*(x3); } inline void V3DNor( double& s, double& x1, double& y1, double& z1) { s= std::sqrt((x1)*(x1)+(y1)*(y1)+(z1)*(z1)); if((s)>EPSILON)(x1)=(x1)/(s); if((s)>EPSILON)(y1)=(y1)/(s); if((s)>EPSILON)(z1)=(z1)/(s); } inline void V3DDot(double& s, const double& x1, const double& y1, const double& z1, const double& x2, const double& y2, const double& z2) { s = ((x1)*(x2))+((y1)*(y2))+((z1)*(z2)); } }; } // namespace Kratos. #endif // KRATOS_GEOMETRY_UTILITIES_INCLUDED defined
GB_unaryop__lnot_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int32 // op(A') function: GB_tran__lnot_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int32 ( int64_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MergeImages.h
/****************************************************************************** * SOFA, Simulation Open-Framework Architecture, development version * * (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH * * * * This program is free software; you can redistribute it and/or modify it * * under the terms of the GNU Lesser General Public License as published by * * the Free Software Foundation; either version 2.1 of the License, or (at * * your option) any later version. * * * * This program is distributed in the hope that it will be useful, but WITHOUT * * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * * for more details. * * * * You should have received a copy of the GNU Lesser General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>. * ******************************************************************************* * Authors: The SOFA Team and external contributors (see Authors.txt) * * * * Contact information: contact@sofa-framework.org * ******************************************************************************/ #ifndef SOFA_IMAGE_MERGEIMAGES_H #define SOFA_IMAGE_MERGEIMAGES_H #include <image/config.h> #include "ImageTypes.h" #include <sofa/core/DataEngine.h> #include <sofa/core/objectmodel/BaseObject.h> #include <sofa/defaulttype/Vec.h> #include <sofa/helper/rmath.h> #include <sofa/helper/OptionsGroup.h> #include <sofa/helper/vectorData.h> #ifdef _OPENMP #include <omp.h> #endif #define AVERAGE 0 #define ORDER 1 #define ALPHABLEND 2 #define SEPARATE 3 #define ADDITIVE 4 #define INTERSECT 5 #define INTERPOLATION_NEAREST 0 #define INTERPOLATION_LINEAR 1 #define INTERPOLATION_CUBIC 2 namespace sofa { namespace component { namespace engine { /** * This class merges images into one */ template <class _ImageTypes> class MergeImages : public core::DataEngine { public: typedef core::DataEngine Inherited; SOFA_CLASS(SOFA_TEMPLATE(MergeImages,_ImageTypes),Inherited); typedef _ImageTypes ImageTypes; typedef typename ImageTypes::T T; typedef typename ImageTypes::imCoord imCoord; typedef helper::WriteOnlyAccessor<Data< ImageTypes > > waImage; typedef helper::ReadAccessor<Data< ImageTypes > > raImage; typedef SReal Real; typedef defaulttype::ImageLPTransform<Real> TransformType; typedef typename TransformType::Coord Coord; typedef helper::WriteOnlyAccessor<Data< TransformType > > waTransform; typedef helper::ReadAccessor<Data< TransformType > > raTransform; Data<helper::OptionsGroup> overlap; Data<helper::OptionsGroup> Interpolation; Data<unsigned int> nbImages; helper::vectorData<ImageTypes> inputImages; helper::vectorData<TransformType> inputTransforms; Data<ImageTypes> image; Data<TransformType> transform; virtual std::string getTemplateName() const { return templateName(this); } static std::string templateName(const MergeImages<ImageTypes>* = NULL) { return ImageTypes::Name(); } MergeImages() : Inherited() , overlap ( initData ( &overlap,"overlap","method for handling overlapping regions" ) ) , Interpolation( initData ( &Interpolation,"interpolation","Interpolation method." ) ) , nbImages ( initData ( &nbImages,(unsigned int)0,"nbImages","number of images to merge" ) ) , inputImages(this, "image", "input image") , inputTransforms(this, "transform", "input transform") , image(initData(&image,ImageTypes(),"image","Image")) , transform(initData(&transform,TransformType(),"transform","Transform")) { inputImages.resize(nbImages.getValue()); inputTransforms.resize(nbImages.getValue()); image.setReadOnly(true); transform.setReadOnly(true); this->addAlias(&image, "outputImage"); this->addAlias(&transform, "outputTransform"); helper::OptionsGroup overlapOptions(6 ,"0 - Average pixels" ,"1 - Use image order as priority" ,"2 - Alpha blending according to distance from border" ,"3 - Take farthest pixel from border" ,"4 - Add pixels of each images" ,"5 - Set overlapping pixels of the first image to zero (only if the corresponding pixel in the other images different to zero)" ); overlapOptions.setSelectedItem(ALPHABLEND); overlap.setValue(overlapOptions); helper::OptionsGroup InterpolationOptions(3,"Nearest", "Linear", "Cubic"); InterpolationOptions.setSelectedItem(INTERPOLATION_LINEAR); Interpolation.setValue(InterpolationOptions); } virtual ~MergeImages() { } virtual void init() { addInput(&nbImages); inputImages.resize(nbImages.getValue()); inputTransforms.resize(nbImages.getValue()); addOutput(&image); addOutput(&transform); setDirtyValue(); } virtual void reinit() { inputImages.resize(nbImages.getValue()); inputTransforms.resize(nbImages.getValue()); update(); } /// Parse the given description to assign values to this object's fields and potentially other parameters void parse ( sofa::core::objectmodel::BaseObjectDescription* arg ) { inputImages.parseSizeData(arg, nbImages); inputTransforms.parseSizeData(arg, nbImages); Inherit1::parse(arg); } /// Assign the field values stored in the given map of name -> value pairs void parseFields ( const std::map<std::string,std::string*>& str ) { inputImages.parseFieldsSizeData(str, nbImages); inputTransforms.parseFieldsSizeData(str, nbImages); Inherit1::parseFields(str); } protected: struct pttype // to handle overlaps, we need to record some values and positions for each image { helper::vector<helper::vector<double> > vals; Coord u; }; virtual void update() { unsigned int nb = nbImages.getValue(); inputImages.resize(nb); inputTransforms.resize(nb); if(!nb) return; defaulttype::Vec<2,Coord> BB = this->getBB(0);//bounding box of the output image Coord minScale; for (unsigned int j = 0 ; j < this->getScale(0).size(); j++) minScale[j] = fabs(this->getScale(0)[j]); for(unsigned int j=1; j<nb; j++) { defaulttype::Vec<2,Coord> bb = this->getBB(j); for(unsigned int k=0; k<bb[0].size(); k++) { //BB is axis-aligned if(BB[0][k]>bb[0][k]) BB[0][k]=bb[0][k]; if(BB[1][k]<bb[1][k]) BB[1][k]=bb[1][k]; } for(unsigned int k=0; k<3; k++) if( minScale[k] > fabs(this->getScale(j)[k]) ) minScale[k] = fabs(this->getScale(j)[k]); } // transform = translated version of inputTransforms[0] with minimum voxel size raTransform inT0(this->inputTransforms[0]); waTransform outT(this->transform); outT->operator=(inT0); outT->getRotation()=Coord(); //reset rotation because output image is axis aligned outT->getTranslation()=BB[0]; outT->getScale()=minScale; outT->update(); //update internal quaternion depending on the rotation // set image raImage in0(this->inputImages[0]); if(in0->isEmpty()) return; imCoord dim=in0->getDimensions(); dim[ImageTypes::DIMENSION_X]=fabs(BB[1][0] - BB[0][0]) / fabs(outT->getScale()[0]); dim[ImageTypes::DIMENSION_Y]=fabs(BB[1][1] - BB[0][1]) / fabs(outT->getScale()[1]); dim[ImageTypes::DIMENSION_Z]=fabs(BB[1][2] - BB[0][2]) / fabs(outT->getScale()[2]); waImage out(this->image); out->clear(); out->setDimensions(dim); unsigned int overlp = this->overlap.getValue().getSelectedId(); cimg_library::CImgList<T>& img = out->getCImgList(); #ifdef _OPENMP #pragma omp parallel for #endif cimg_forXYZ(img(0),x,y,z) //space { for(unsigned int t=0; t<dim[4]; t++) for(unsigned int k=0; k<dim[3]; k++) img(t)(x,y,z,k) = (T)0; Coord p = outT->fromImage(Coord(x,y,z)); //coordinate of voxel (x,y,z) in world space helper::vector<struct pttype> pts; for(unsigned int j=0; j<nb; j++) // store values at p from input images { raImage in(this->inputImages[j]); const cimg_library::CImgList<T>& inImg = in->getCImgList(); const imCoord indim=in->getDimensions(); raTransform inT(this->inputTransforms[j]); Coord inp=inT->toImage(p); //corresponding voxel in image j if(inp[0]>=0 && inp[1]>=0 && inp[2]>=0 && inp[0]<=indim[0]-1 && inp[1]<=indim[1]-1 && inp[2]<=indim[2]-1) { struct pttype pt; if(Interpolation.getValue().getSelectedId()==INTERPOLATION_NEAREST) for(unsigned int t=0; t<indim[4] && t<dim[4]; t++) // time { pt.vals.push_back(helper::vector<double>()); for(unsigned int k=0; k<indim[3] && k<dim[3]; k++) // channels pt.vals[t].push_back((double)inImg(t).atXYZ(sofa::helper::round((double)inp[0]),sofa::helper::round((double)inp[1]),sofa::helper::round((double)inp[2]),k)); } else if(Interpolation.getValue().getSelectedId()==INTERPOLATION_LINEAR) for(unsigned int t=0; t<indim[4] && t<dim[4]; t++) // time { pt.vals.push_back(helper::vector<double>()); for(unsigned int k=0; k<indim[3] && k<dim[3]; k++) // channels pt.vals[t].push_back((double)inImg(t).linear_atXYZ(inp[0],inp[1],inp[2],k)); } else for(unsigned int t=0; t<indim[4] && t<dim[4]; t++) // time { pt.vals.push_back(helper::vector<double>()); for(unsigned int k=0; k<indim[3] && k<dim[3]; k++) // channels pt.vals[t].push_back((double)inImg(t).cubic_atXYZ(inp[0],inp[1],inp[2],k)); } pt.u=Coord( ( inp[0]< indim[0]-inp[0]-1)? inp[0]: indim[0]-inp[0]-1 , ( inp[1]< indim[1]-inp[1]-1)? inp[1]: indim[1]-inp[1]-1 , ( inp[2]< indim[2]-inp[2]-1)? inp[2]: indim[2]-inp[2]-1 ); // distance from border bool isnotnull=false; for(unsigned int t=0; t<pt.vals.size(); t++) for(unsigned int k=0; k<pt.vals[t].size(); k++) if(pt.vals[t][k]!=(T)0) isnotnull=true; if(isnotnull) pts.push_back(pt); } } unsigned int nbp=pts.size(); if(nbp==0) continue; else if(nbp==1) { for(unsigned int t=0; t<pts[0].vals.size(); t++) for(unsigned int k=0; k<pts[0].vals[t].size(); k++) if((T)pts[0].vals[t][k]!=(T)0) img(t)(x,y,z,k) = (T)pts[0].vals[t][k]; } else if(nbp>1) { unsigned int nbt=pts[0].vals.size(); unsigned int nbc=pts[0].vals[0].size(); if(overlp==AVERAGE) { for(unsigned int j=1; j<nbp; j++) for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) pts[0].vals[t][k] += pts[j].vals[t][k]; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) img(t)(x,y,z,k) = (T)(pts[0].vals[t][k]/(double)nbp); } else if(overlp==ORDER) { for(int j=nbp-1; j>=0; j--) for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) if((T)pts[j].vals[t][k]!=(T)0) img(t)(x,y,z,k) = (T)pts[j].vals[t][k]; } else if(overlp==ALPHABLEND) { unsigned int dir=0; if(pts[1].u[1]!=pts[0].u[1]) dir=1; if(pts[1].u[2]!=pts[0].u[2]) dir=2; // blending direction = direction where distance to border is different double count=pts[0].u[dir]; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) pts[0].vals[t][k]*=pts[0].u[dir]; for(unsigned int j=1; j<nbp; j++) { count+=pts[j].u[dir]; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) pts[0].vals[t][k] += pts[j].vals[t][k]*pts[j].u[dir]; } for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) img(t)(x,y,z,k) = (T)(pts[0].vals[t][k]/count); } else if(overlp==SEPARATE) { for(unsigned int j=1; j<nbp; j++) if(pts[j].u[0]>pts[0].u[0] || pts[j].u[1]>pts[0].u[1] || pts[j].u[2]>pts[0].u[2]) { pts[0].u= pts[j].u; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) pts[0].vals[t][k] = pts[j].vals[t][k]; } for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) img(t)(x,y,z,k) = (T)pts[0].vals[t][k]; } else if(overlp==ADDITIVE) { for(unsigned int j=1; j<nbp; j++) for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) pts[0].vals[t][k] += pts[j].vals[t][k]; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) img(t)(x,y,z,k) = (T)(pts[0].vals[t][k]); } else if(overlp==INTERSECT) { for(unsigned int j=1; j<nbp; j++) for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) if (pts[0].vals[t][k] && pts[j].vals[t][k]) pts[0].vals[t][k] = (T)0.0; for(unsigned int t=0; t<nbt; t++) for(unsigned int k=0; k<nbc; k++) img(t)(x,y,z,k) = (T)(pts[0].vals[t][k]); } } } sout << "Created merged image from " << nb << " input images." << sendl; cleanDirty(); } defaulttype::Vec<2,Coord> getBB(unsigned int i) // get image corners { defaulttype::Vec<2,Coord> BB; raImage rimage(this->inputImages[i]); raTransform rtransform(this->inputTransforms[i]); const imCoord dim= rimage->getDimensions(); defaulttype::Vec<8,Coord> p; p[0]=defaulttype::Vector3(0,0,0); p[1]=defaulttype::Vector3(dim[0]-1,0,0); p[2]=defaulttype::Vector3(0,dim[1]-1,0); p[3]=defaulttype::Vector3(dim[0]-1,dim[1]-1,0); p[4]=defaulttype::Vector3(0,0,dim[2]-1); p[5]=defaulttype::Vector3(dim[0]-1,0,dim[2]-1); p[6]=defaulttype::Vector3(0,dim[1]-1,dim[2]-1); p[7]=defaulttype::Vector3(dim[0]-1,dim[1]-1,dim[2]-1); Coord tp=rtransform->fromImage(p[0]); BB[0]=tp; BB[1]=tp; for(unsigned int j=1; j<8; j++) { tp=rtransform->fromImage(p[j]); for(unsigned int k=0; k<tp.size(); k++) { if(BB[0][k]>tp[k]) BB[0][k]=tp[k]; if(BB[1][k]<tp[k]) BB[1][k]=tp[k]; } } return BB; } Coord getScale(unsigned int i) { Coord scale; raTransform rtransform(this->inputTransforms[i]); scale=rtransform->getScale(); return scale; } }; } // namespace engine } // namespace component } // namespace sofa #endif // SOFA_IMAGE_MergeImages_H
GB_binop__islt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint16) // A*D function (colscale): GB (_AxD__islt_uint16) // D*A function (rowscale): GB (_DxB__islt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint16) // C=scalar+B GB (_bind1st__islt_uint16) // C=scalar+B' GB (_bind1st_tran__islt_uint16) // C=A+scalar GB (_bind2nd__islt_uint16) // C=A'+scalar GB (_bind2nd_tran__islt_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
8020.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(1) { #pragma omp for schedule(static, 16) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ #pragma omp for schedule(static, 16) for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
stencil.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************* NAME: Stencil PURPOSE: This program tests the efficiency with which a space-invariant, linear, symmetric filter (stencil) can be applied to a square grid or image. USAGE: The program takes as input the number of threads, the linear dimension of the grid, and the number of iterations on the grid <progname> <# threads> <iterations> <grid size> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by Rob Van der Wijngaart, November 2006. - RvdW: Removed unrolling pragmas for clarity; added constant to array "in" at end of each iteration to force refreshing of neighbor data in parallel versions; August 2013 *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> #ifndef RADIUS #define RADIUS 2 #endif #ifdef DOUBLE #define DTYPE double #define EPSILON 1.e-8 #define COEFX 1.0 #define COEFY 1.0 #define FSTR "%lf" #else #define DTYPE float #define EPSILON 0.0001f #define COEFX 1.0f #define COEFY 1.0f #define FSTR "%f" #endif /* define shorthand for indexing a multi-dimensional array */ #define IN(i,j) in[i+(j)*(n)] #define OUT(i,j) out[i+(j)*(n)] #define WEIGHT(ii,jj) weight[ii+RADIUS][jj+RADIUS] int main(int argc, char ** argv) { int n; /* linear grid dimension */ int i, j, ii, jj, it, jt, iter; /* dummies */ DTYPE norm, /* L1 norm of solution */ reference_norm; DTYPE f_active_points; /* interior of grid with respect to stencil */ DTYPE flops; /* floating point ops per iteration */ int iterations; /* number of times to run the algorithm */ double stencil_time, /* timing parameters */ avgtime = 0.0, maxtime = 0.0, mintime = 366.0*24.0*3600.0; /* set the minimum time to a large value; one leap year should be enough */ int stencil_size; /* number of points in stencil */ int tile_size; /* grid block factor */ int nthread_input, /* thread parameters */ nthread; DTYPE * RESTRICT in; /* input grid values */ DTYPE * RESTRICT out; /* output grid values */ int total_length; /* total required length to store grid values */ int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ DTYPE weight[2*RADIUS+1][2*RADIUS+1]; /* weights of points in the stencil */ /******************************************************************************* ** process and test input parameters ********************************************************************************/ if (argc != 4 && argc != 5){ printf("Usage: %s <# threads> <# iterations> <array dimension> <tile size>\n", *argv); return(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: iterations must be >= 1 : %d \n",iterations); exit(EXIT_FAILURE); } n = atoi(*++argv); if (n < 1){ printf("ERROR: grid dimension must be positive: %d\n", n); exit(EXIT_FAILURE); } if (RADIUS < 1) { printf("ERROR: Stencil radius %d should be positive\n", RADIUS); exit(EXIT_FAILURE); } if (2*RADIUS +1 > n) { printf("ERROR: Stencil radius %d exceeds grid size %d\n", RADIUS, n); exit(EXIT_FAILURE); } /* make sure the vector space can be represented */ total_length = n*n*sizeof(DTYPE); if (total_length/n != n*sizeof(DTYPE)) { printf("ERROR: Space for %d x %d grid cannot be represented; ", n, n); exit(EXIT_FAILURE); } if (argc == 5) { tile_size = atoi(*++argv); if (tile_size < 1) { printf("ERROR: tile size must be positive : %d\n", tile_size); exit(EXIT_FAILURE); } } else tile_size = n; in = (DTYPE *) malloc(total_length); out = (DTYPE *) malloc(total_length); if (!in || !out) { printf("ERROR: could not allocate space for input or output array\n"); exit(EXIT_FAILURE); } /* fill the stencil weights to reflect a discrete divergence operator */ for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++) WEIGHT(ii,jj) = (DTYPE) 0.0; #ifdef STAR stencil_size = 4*RADIUS+1; for (ii=1; ii<=RADIUS; ii++) { WEIGHT(0, ii) = WEIGHT( ii,0) = (DTYPE) (1.0/(2.0*ii*RADIUS)); WEIGHT(0,-ii) = WEIGHT(-ii,0) = -(DTYPE) (1.0/(2.0*ii*RADIUS)); } #else stencil_size = (2*RADIUS+1)*(2*RADIUS+1); for (jj=1; jj<=RADIUS; jj++) { for (ii=-jj+1; ii<jj; ii++) { WEIGHT(ii,jj) = (DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(ii,-jj) = -(DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(jj,ii) = (DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(-jj,ii) = -(DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); } WEIGHT(jj,jj) = (DTYPE) (1.0/(4.0*jj*RADIUS)); WEIGHT(-jj,-jj) = -(DTYPE) (1.0/(4.0*jj*RADIUS)); } #endif norm = (DTYPE) 0.0; f_active_points = (DTYPE) (n-2*RADIUS)*(DTYPE) (n-2*RADIUS); #pragma omp parallel private(i, j, ii, jj, it, jt, iter) { #pragma omp master { nthread = omp_get_num_threads(); printf("OpenMP stencil execution on 2D grid\n"); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %d\n",nthread_input); printf("Grid size = %d\n", n); printf("Radius of stencil = %d\n", RADIUS); if (tile_size <n-2*RADIUS) printf("Tile size = %d\n", tile_size); else printf("Grid not tiled\n"); #ifdef STAR printf("Type of stencil = star\n"); #else printf("Type of stencil = compact\n"); #endif #ifdef DOUBLE printf("Data type = double precision\n"); #else printf("Data type = single precision\n"); #endif printf("Number of iterations = %d\n", iterations); } } bail_out(num_error); /* intialize the input and output arrays */ #pragma omp for for (j=0; j<n; j++) for (i=0; i<n; i++) IN(i,j) = COEFX*i+COEFY*j; #pragma omp for for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++) OUT(i,j) = (DTYPE)0.0; for (iter = 0; iter<iterations; iter++){ #pragma omp barrier #pragma omp master { stencil_time = wtime(); } /* Apply the stencil operator; only use tiling if the tile size is smaller than the iterior part of the grid */ if (tile_size < n-2*RADIUS) { #pragma omp for for (j=RADIUS; j<n-RADIUS; j+=tile_size) { for (i=RADIUS; i<n-RADIUS; i+=tile_size) { for (jt=j; jt<MIN(n-RADIUS,j+tile_size); jt++) { for (it=i; it<MIN(n-RADIUS,i+tile_size); it++) { #ifdef STAR for (jj=-RADIUS; jj<=RADIUS; jj++) OUT(it,jt) += WEIGHT(0,jj)*IN(it,jt+jj); for (ii=-RADIUS; ii<0; ii++) OUT(it,jt) += WEIGHT(ii,0)*IN(it+ii,jt); for (ii=1; ii<=RADIUS; ii++) OUT(it,jt) += WEIGHT(ii,0)*IN(it+ii,jt); #else /* would like to be able to unroll this loop, but compiler will ignore */ for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++) OUT(it,jt) += WEIGHT(ii,jj)*IN(it+ii,jt+jj); #endif } } } } } else { #pragma omp for for (j=RADIUS; j<n-RADIUS; j++) { for (i=RADIUS; i<n-RADIUS; i++) { #ifdef STAR for (jj=-RADIUS; jj<=RADIUS; jj++) OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj); for (ii=-RADIUS; ii<0; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); for (ii=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); #else /* would like to be able to unroll this loop, but compiler will ignore */ for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,jj)*IN(i+ii,j+jj); #endif } } } #pragma omp master { stencil_time = wtime() - stencil_time; if (iter>0 || iterations==1) { /* skip the first iteration */ avgtime = avgtime + stencil_time; mintime = MIN(mintime, stencil_time); maxtime = MAX(maxtime, stencil_time); } } /* add constant to solution to force refresh of neighbor data, if any */ #pragma omp for for (j=0; j<n; j++) for (i=0; i<n; i++) IN(i,j)+= 1.0; } /* compute L1 norm in parallel */ #pragma omp for reduction(+:norm) for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++) { norm += (DTYPE)ABS(OUT(i,j)); } } /* end of OPENMP parallel region */ norm /= f_active_points; /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness */ reference_norm = (DTYPE) iterations * (COEFX + COEFY); if (ABS(norm-reference_norm) > EPSILON) { printf("ERROR: L1 norm = "FSTR", Reference L1 norm = "FSTR"\n", norm, reference_norm); exit(EXIT_FAILURE); } else { printf("Solution validates\n"); #ifdef VERBOSE printf("Reference L1 norm = "FSTR", L1 norm = "FSTR"\n", reference_norm, norm); #endif } flops = (DTYPE) (2*stencil_size-1) * f_active_points; avgtime = avgtime/(double)(MAX(iterations-1,1)); printf("Rate (MFlops/s): "FSTR", Avg time (s): %lf, Min time (s): %lf", 1.0E-06 * flops/mintime, avgtime, mintime); printf(", Max time (s): %lf\n", maxtime); exit(EXIT_SUCCESS); }
multiply.h
#pragma once #include <vector> #include <unordered_map> #include <algorithm> #include <omp.h> #include "_cuda.h" using std::vector; using std::unordered_map; using std::max; template <class T> void multiply(T *x, int N, T v) { for (int i=0; i<N; i++) x[i] *= v; } template <class T> void multiply(vector<T>& x, T v) { multiply(x.data(), x.size(), v); } template <class K, class T> void multiply(unordered_map<K, T>& x, T v) { for (auto& p : x) p.second *= v; } template <class T> void multiply(T *a, T *x, T *y, int N) { for (int i=0; i<N; i++) a[i] = x[i] * y[i]; } template <class T> void multiply(vector<T>& a, vector<T>& x, vector<T>& y) { multiply(a.data(), x.data(), y.data(), a.size()); } template <class K, class T> void multiply(unordered_map<K, T>& a, unordered_map<K, T>& x, unordered_map<K, T>& y) { for (auto& p : x) a[p.first] = x[p.first] * y[p.first]; } template <class T, class C> void multiplyAt(T *x, C&& is , T v) { for (int i : is) x[i] *= v; } template <class T, class C> void multiplyAt(vector<T>& x, C&& is, T v) { multiplyAt(x.data(), is, v); } template <class K, class T, class C> void multiplyAt(unordered_map<K, T>& x, C&& ks, T v) { for (auto&& k : ks) x[k] *= v; } template <class T> void multiplyOmp(T *x, int N, T v) { #pragma omp parallel for for (int i=0; i<N; i++) x[i] *= v; } template <class T> void multiplyOmp(vector<T>& x, T v) { multiplyOmp(x.data(), x.size(), v); } template <class T> __device__ void multiplyKernelLoop(T *a, int N, T v, int i, int DI) { for (; i<N; i+=DI) a[i] *= v; } template <class T> __global__ void multiplyKernel(T *a, int N, T v) { DEFINE(t, b, B, G); multiplyKernelLoop(a, N, v, B*b+t, G*B); } template <class T> void multiplyCuda(T *a, int N, T v) { int threads = _THREADS; int blocks = min(ceilDiv(N, threads), _BLOCKS); size_t A1 = N * sizeof(T); T *aD; TRY( cudaMalloc(&aD, A1) ); TRY( cudaMemcpy(aD, a, A1, cudaMemcpyHostToDevice) ); multiplyKernel<<<blocks, threads>>>(aD, N, v); TRY( cudaMemcpy(a, aD, A1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(aD) ); } template <class T> void multiplyCuda(vector<T>& x, T v) { multiplyCuda(x.data(), x.size(), v); } template <class T> __device__ void multiplyKernelLoop(T *a, T *x, T *y, int N, int i, int DI) { for (; i<N; i+=DI) a[i] = x[i] * y[i]; } template <class T> __global__ void multiplyKernel(T *a, T *x, T* y, int N) { DEFINE(t, b, B, G); multiplyKernelLoop(a, x, y, N, B*b+t, G*B); } template <class T> void multiplyCuda(T *a, T *x, T *y, int N, T v) { int threads = _THREADS; int blocks = min(ceilDiv(N, threads), _BLOCKS); size_t A1 = N * sizeof(T); T *xD, *yD; TRY( cudaMalloc(&xD, A1) ); TRY( cudaMalloc(&yD, A1) ); TRY( cudaMemcpy(xD, x, A1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, A1, cudaMemcpyHostToDevice) ); multiplyKernel<<<blocks, threads>>>(xD, xD, yD, N); TRY( cudaMemcpy(a, xD, A1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(xD) ); TRY( cudaFree(yD) ); } template <class T> void multiplyCuda(vector<T>& a, vector<T>& x, vector<T> &y) { multiplyCuda(a.data(), x.data(), y.data(), a.size()); }
mbpush2.c
/* C Library for Skeleton 2-1/2D Electromagnetic OpenMP PIC Code */ /* written by Viktor K. Decyk, UCLA */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include "mbpush2.h" /*--------------------------------------------------------------------*/ double ranorm() { /* this program calculates a random number y from a gaussian distribution with zero mean and unit variance, according to the method of mueller and box: y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1)) y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)), where x is a random number uniformly distributed on (0,1). written for the ibm by viktor k. decyk, ucla local data */ static int r1 = 885098780, r2 = 1824280461; static int r4 = 1396483093, r5 = 55318673; static int iflg = 0; static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0; static double r0 = 0.0; int isc, i1; double ranorm, r3, asc, bsc, temp; if (iflg==1) { ranorm = r0; r0 = 0.0; iflg = 0; return ranorm; } isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r1 - (r1/isc)*isc; r3 = h1l*(double) r1 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r2/isc; isc = r2 - i1*isc; r0 = h1l*(double) r2 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r2 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r1 = r3 - ((double) isc)*bsc; temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc)); isc = 65536; asc = (double) isc; bsc = asc*asc; i1 = r4 - (r4/isc)*isc; r3 = h2l*(double) r4 + asc*h1u*(double) i1; i1 = r3/bsc; r3 -= ((double) i1)*bsc; bsc = 0.5*bsc; i1 = r5/isc; isc = r5 - i1*isc; r0 = h2l*(double) r5 + asc*h1u*(double) isc; asc = 1.0/bsc; isc = r0*asc; r5 = r0 - ((double) isc)*bsc; r3 += (double) isc + 2.0*h1u*(double) i1; isc = r3*asc; r4 = r3 - ((double) isc)*bsc; r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc); ranorm = temp*sin(r0); r0 = temp*cos(r0); iflg = 1; return ranorm; } /*--------------------------------------------------------------------*/ void cdistr2h(float part[], float vtx, float vty, float vtz, float vdx, float vdy, float vdz, int npx, int npy, int idimp, int nop, int nx, int ny, int ipbc) { /* for 2-1/2d code, this subroutine calculates initial particle co-ordinates and velocities with uniform density and maxwellian velocity with drift part[n][0] = position x of particle n part[n][1] = position y of particle n part[n][2] = velocity vx of particle n part[n][3] = velocity vy of particle n part[n][4] = velocity vz of particle n vtx/vty/vtz = thermal velocity of electrons in x/y/z direction vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction npx/npy = initial number of particles distributed in x/y direction idimp = size of phase space = 5 nop = number of particles nx/ny = system length in x/y direction ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) ranorm = gaussian random number with zero mean and unit variance local data */ int j, k, k1, npxy; float edgelx, edgely, at1, at2, at3, sum1, sum2, sum3; double dsum1, dsum2, dsum3; npxy = npx*npy; /* set boundary values */ edgelx = 0.0; edgely = 0.0; at1 = (float) nx/(float) npx; at2 = (float) ny/(float) npy; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) (ny-2)/(float) npy; } else if (ipbc==3) { edgelx = 1.0; edgely = 0.0; at1 = (float) (nx-2)/(float) npx; at2 = (float) ny/(float) npy; } /* uniform density profile */ for (k = 0; k < npy; k++) { k1 = idimp*npx*k; at3 = edgely + at2*(((float) k) + 0.5); for (j = 0; j < npx; j++) { part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5); part[1+idimp*j+k1] = at3; } } /* maxwellian velocity distribution */ for (j = 0; j < npxy; j++) { part[2+idimp*j] = vtx*ranorm(); part[3+idimp*j] = vty*ranorm(); part[4+idimp*j] = vtz*ranorm(); } /* add correct drift */ dsum1 = 0.0; dsum2 = 0.0; dsum3 = 0.0; for (j = 0; j < npxy; j++) { dsum1 += part[2+idimp*j]; dsum2 += part[3+idimp*j]; dsum3 += part[4+idimp*j]; } sum1 = dsum1; sum2 = dsum2; sum3 = dsum3; at1 = 1.0/(float) npxy; sum1 = at1*sum1 - vdx; sum2 = at1*sum2 - vdy; sum3 = at1*sum3 - vdz; for (j = 0; j < npxy; j++) { part[2+idimp*j] -= sum1; part[3+idimp*j] -= sum2; part[4+idimp*j] -= sum3; } return; } /*--------------------------------------------------------------------*/ void cdblkp2l(float part[], int kpic[], int *nppmx, int idimp, int nop, int mx, int my, int mx1, int mxy1, int *irc) { /* this subroutine finds the maximum number of particles in each tile of mx, my to calculate size of segmented particle array ppart linear interpolation part = input particle array part[n][0] = position x of particle n part[n][1] = position y of particle n kpic = output number of particles per tile nppmx = return maximum number of particles in tile idimp = size of phase space = 4 nop = number of particles mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int j, k, n, m, isum, ist, npx, ierr; ierr = 0; /* clear counter array */ for (k = 0; k < mxy1; k++) { kpic[k] = 0; } /* find how many particles in each tile */ for (j = 0; j < nop; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = m/my; m = n + mx1*m; if (m < mxy1) { kpic[m] += 1; } else { ierr = ierr > (m - mxy1 + 1) ? ierr : (m - mxy1 + 1); } } /* find maximum */ isum = 0; npx = 0; for (k = 0; k < mxy1; k++) { ist = kpic[k]; npx = npx > ist ? npx : ist; isum += ist; } *nppmx = npx; /* check for errors */ if (ierr > 0) { *irc = ierr; } else if (isum != nop) { *irc = -1; } return; } /*--------------------------------------------------------------------*/ void cppmovin2l(float part[], float ppart[], int kpic[], int nppmx, int idimp, int nop, int mx, int my, int mx1, int mxy1, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my and copies to segmented array ppart linear interpolation input: all except ppart, kpic, output: ppart, kpic part/ppart = input/output particle arrays part[n][0] = position x of particle n in partition part[n][1] = position y of particle n in partition ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = velocity vx of particle n in tile m ppart[m][n][3] = velocity vy of particle n in tile m kpic = output number of particles per tile nppmx = maximum number of particles in tile idimp = size of phase space = 4 nop = number of particles mx/my = number of grids in sorting cell in x and y mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int i, j, k, n, m, ip, ierr; ierr = 0; /* clear counter array */ for (k = 0; k < mxy1; k++) { kpic[k] = 0; } /* find addresses of particles at each tile and reorder particles */ for (j = 0; j < nop; j++) { n = part[idimp*j]; m = part[1+idimp*j]; n = n/mx; m = m/my; m = n + mx1*m; ip = kpic[m]; if (ip < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j]; } } else { ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1; } kpic[m] = ip + 1; } if (ierr > 0) *irc = ierr; return; } /*--------------------------------------------------------------------*/ void cppcheck2l(float ppart[], int kpic[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int *irc) { /* this subroutine performs a sanity check to make sure particles sorted by x,y grid in tiles of mx, my, are all within bounds. tiles are assumed to be arranged in 2D linear memory input: all except irc output: irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k kpic[k] = number of reordered output particles in tile k idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 irc = particle error, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, moff, npp, j, k, ist, nn, mm; float edgelx, edgely, edgerx, edgery, dx, dy; mxy1 = mx1*my1; /* loop over tiles */ #pragma omp parallel for \ private(j,k,noff,moff,npp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx,dy) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; if (dx < edgelx) ist = 1; if (dx >= edgerx) ist = 2; if (dy < edgely) ist += 3; if (dy >= edgery) ist += 6; if (ist > 0) *irc = k + 1; } } return; } /*--------------------------------------------------------------------*/ void cgbppush23l(float ppart[], float fxy[], float bxy[], int kpic[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. OpenMP version using guard cells data deposited in tiles particles stored segmented array 119 flops/particle, 1 divide, 29 loads, 5 stores input: all, output: ppart, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and omz = (q/m)*bz(x(t),y(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x velocity of particle n in tile m ppart[m][n][3] = y velocity of particle n in tile m ppart[m][n][4] = z velocity of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, nn, mm, nm; float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt; float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y; float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV]; /* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */ double sum1, sum2; mxv3 = 3*(mx + 1); qtmh = 0.5*qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0; edgely = 0.0; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,dxp,dyp,amx,amy,dx,dy, \ dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4, \ rot5,rot6,rot7,rot8,rot9,sum1,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))]; sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))]; sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))]; } } for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))]; sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))]; sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))]; } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 3*(nn - noff) + mxv3*(mm - moff); amx = 1.0 - dxp; amy = 1.0 - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 3; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += mxv3; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 3; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 3; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += mxv3; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 3; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[2+idimp*(j+npoff)] + dx; acy = ppart[3+idimp*(j+npoff)] + dy; acz = ppart[4+idimp*(j+npoff)] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0/(1.0 + omt); omt = 0.5*(1.0 - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; ppart[4+idimp*(j+npoff)] = dz; /* new position */ dx = x + dx*dtc; dy = y + dy*dtc; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } sum2 += sum1; } /* normalize kinetic energy */ *ek += 0.5*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgbppushf23l(float ppart[], float fxy[], float bxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 119 flops/particle, 1 divide, 29 loads, 5 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and omz = (q/m)*bz(x(t),y(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x velocity of particle n in tile m ppart[m][n][3] = y velocity of particle n in tile m ppart[m][n][4] = z velocity of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, ih, nh, nn, mm, nm; float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float anx, any, edgelx, edgely, edgerx, edgery; float x, y; float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV]; /* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */ double sum1, sum2; mxv3 = 3*(mx + 1); qtmh = 0.5*qbm*dt; anx = (float) nx; any = (float) ny; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,dxp,dyp,amx,amy, \ dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3, \ rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery,sum1,sfxy, \ sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; ih = 0; nh = 0; nn += 1; mm += 1; /* load local fields from global array */ for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))]; sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))]; sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))]; } } for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))]; sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))]; sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))]; } } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 3*(nn - noff) + mxv3*(mm - moff); amx = 1.0 - dxp; amy = 1.0 - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 3; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += mxv3; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 3; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 3; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += mxv3; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 3; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[2+idimp*(j+npoff)] + dx; acy = ppart[3+idimp*(j+npoff)] + dy; acz = ppart[4+idimp*(j+npoff)] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0/(1.0 + omt); omt = 0.5*(1.0 - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; ppart[4+idimp*(j+npoff)] = dz; /* new position */ dx = x + dx*dtc; dy = y + dy*dtc; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += 0.5*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgrbppush23l(float ppart[], float fxy[], float bxy[], int kpic[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. OpenMP version using guard cells data deposited in tiles particles stored segmented array 131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores input: all, output: ppart, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and omz = (q/m)*bz(x(t),y(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x momentum of particle n in tile m ppart[m][n][3] = y momentum of particle n in tile m ppart[m][n][4] = z momentum of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, nn, mm, nm; float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg; float omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y; float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV]; /* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */ double sum1, sum2; mxv3 = 3*(mx + 1); qtmh = 0.5*qbm*dt; ci2 = ci*ci; sum2 = 0.0; /* set boundary values */ edgelx = 0.0; edgely = 0.0; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,dxp,dyp,amx,amy,dx,dy, \ dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4, \ rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))]; sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))]; sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))]; } } for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))]; sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))]; sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))]; } } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 3*(nn - noff) + mxv3*(mm - moff); amx = 1.0 - dxp; amy = 1.0 - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 3; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += mxv3; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 3; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 3; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += mxv3; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 3; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[2+idimp*(j+npoff)] + dx; acy = ppart[3+idimp*(j+npoff)] + dy; acz = ppart[4+idimp*(j+npoff)] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0/sqrtf(1.0 + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0 + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0/(1.0 + omt); omt = 0.5*(1.0 - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; ppart[4+idimp*(j+npoff)] = dz; /* update inverse gamma */ p2 = dx*dx + dy*dy + dz*dz; dtg = dtc/sqrtf(1.0 + p2*ci2); /* new position */ dx = x + dx*dtg; dy = y + dy*dtg; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } sum2 += sum1; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgrbppushf23l(float ppart[], float fxy[], float bxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field with periodic boundary conditions. Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and omz = (q/m)*bz(x(t),y(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x momentum of particle n in tile m ppart[m][n][3] = y momentum of particle n in tile m ppart[m][n][4] = z momentum of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, ih, nh, nn, mm, nm; float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz; float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt; float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float anx, any, edgelx, edgely, edgerx, edgery; float x, y; float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV]; /* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */ double sum1, sum2; mxv3 = 3*(mx + 1); qtmh = 0.5*qbm*dt; ci2 = ci*ci; anx = (float) nx; any = (float) ny; sum2 = 0.0; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,dxp,dyp,amx,amy, \ dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3, \ rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery,p2,gami, \ qtmg,dtg,sum1,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; ih = 0; nh = 0; nn += 1; mm += 1; /* load local fields from global array */ for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))]; sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))]; sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))]; } } for (j = 0; j < mm; j++) { for (i = 0; i < nn; i++) { sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))]; sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))]; sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))]; } } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } sum1 = 0.0; /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 3*(nn - noff) + mxv3*(mm - moff); amx = 1.0 - dxp; amy = 1.0 - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 3; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += mxv3; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 3; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 3; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += mxv3; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 3; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[2+idimp*(j+npoff)] + dx; acy = ppart[3+idimp*(j+npoff)] + dy; acz = ppart[4+idimp*(j+npoff)] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0/sqrtf(1.0 + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0 + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0/(1.0 + omt); omt = 0.5*(1.0 - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ dx += (rot1*acx + rot2*acy + rot3*acz)*anorm; dy += (rot4*acx + rot5*acy + rot6*acz)*anorm; dz += (rot7*acx + rot8*acy + rot9*acz)*anorm; ppart[2+idimp*(j+npoff)] = dx; ppart[3+idimp*(j+npoff)] = dy; ppart[4+idimp*(j+npoff)] = dz; /* update inverse gamma */ p2 = dx*dx + dy*dy + dz*dz; dtg = dtc/sqrtf(1.0 + p2*ci2); /* new position */ dx = x + dx*dtg; dy = y + dy*dtg; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } sum2 += sum1; /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgppost2l(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int nxv, int nyv, int mx1, int mxy1) { /* for 2d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP version using guard cells data deposited in tiles particles stored segmented array 17 flops/particle, 6 loads, 4 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m)=qm*(1.-dx)*(1.-dy) q(n+1,m)=qm*dx*(1.-dy) q(n,m+1)=qm*(1.-dx)*dy q(n+1,m+1)=qm*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m q[k][j] = charge density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 4 mx/my = number of grids in sorting cell in x/y nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nn, mm; float x, y, dxp, dyp, amx, amy; float sq[MXV*MYV]; /* float sq[(mx+1)*(my+1)]; */ mxv = mx + 1; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,sq) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* zero out local accumulator */ for (j = 0; j < mxv*(my+1); j++) { sq[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = nn - noff + mxv*(mm - moff); amx = qm - dxp; amy = 1.0f - dyp; /* deposit charge within tile to local accumulator */ x = sq[nn] + amx*amy; y = sq[nn+1] + dxp*amy; sq[nn] = x; sq[nn+1] = y; nn += mxv; x = sq[nn] + amx*dyp; y = sq[nn+1] + dxp*dyp; sq[nn] = x; sq[nn+1] = y; } /* deposit charge to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { q[i+noff+nxv*(j+moff)] += sq[i+mxv*j]; } } /* deposit charge to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff] += sq[i]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)] += sq[mxv*j]; if (nn > mx) { #pragma omp atomic q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgjppost2l(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP version using guard cells data deposited in tiles particles stored segmented array 41 flops/particle, 17 loads, 14 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*vi, where i = x,y,z ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x velocity of particle n in tile m ppart[m][n][3] = y velocity of particle n in tile m ppart[m][n][4] = z velocity of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, nn, mm; float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz; float scu[3*MXV*MYV]; /* float scu[3*(mx+1)*(my+1)]; */ mxv3 = 3*(mx + 1); /* set boundary values */ edgelx = 0.0; edgely = 0.0; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \ vy,vz,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* zero out local accumulator */ for (j = 0; j < mxv3*(my+1); j++) { scu[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = 3*(nn - noff) + mxv3*(mm - moff); amx = qm - dxp; amy = 1.0 - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; vz = ppart[4+idimp*(j+npoff)]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += mxv3; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j]; cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j]; cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j]; } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff)] += scu[3*i]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j]; if (nn > mx) { #pragma omp atomic cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j]; #pragma omp atomic cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j]; #pragma omp atomic cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgjppostf2l(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 41 flops/particle, 17 loads, 14 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*vi, where i = x,y,z ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x velocity of particle n in tile m ppart[m][n][3] = y velocity of particle n in tile m ppart[m][n][4] = z velocity of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp; int i, j, k, ih, nh, nn, mm, mxv3; float dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz; float anx, any, edgelx, edgely, edgerx, edgery; float scu[3*MXV*MYV]; /* float scu[3*(mx+1)*(my+1)]; */ mxv3 = 3*(mx + 1); anx = (float) nx; any = (float) ny; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \ dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; ih = 0; nh = 0; nn += 1; mm += 1; /* zero out local accumulator */ for (j = 0; j < mxv3*(my+1); j++) { scu[j] = 0.0f; } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = 3*(nn - noff) + mxv3*(mm - moff); amx = qm - dxp; amy = 1.0 - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; vz = ppart[4+idimp*(j+npoff)]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += mxv3; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j]; cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j]; cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j]; } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff)] += scu[3*i]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j]; if (nn > mx) { #pragma omp atomic cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j]; #pragma omp atomic cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j]; #pragma omp atomic cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j]; } } /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgrjppost2l(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP version using guard cells data deposited in tiles particles stored segmented array 47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x momentum of particle n in tile m ppart[m][n][3] = y momentum of particle n in tile m ppart[m][n][4] = z momentum of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv3; int i, j, k, nn, mm; float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz, p2, gami; float scu[3*MXV*MYV]; /* float scu[3*(mx+1)*(my+1)]; */ mxv3 = 3*(mx + 1); ci2 = ci*ci; /* set boundary values */ edgelx = 0.0; edgely = 0.0; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0; edgely = 1.0; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0; edgerx = (float) (nx-1); } /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \ vy,vz,p2,gami,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; /* zero out local accumulator */ for (j = 0; j < mxv3*(my+1); j++) { scu[j] = 0.0f; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; /* find inverse gamma */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; vz = ppart[4+idimp*(j+npoff)]; p2 = vx*vx + vy*vy + vz*vz; gami = 1.0/sqrtf(1.0 + p2*ci2); /* calculate weights */ nn = 3*(nn - noff) + mxv3*(mm - moff); amx = qm - dxp; amy = 1.0 - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx *= gami; vy *= gami; vz *= gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += mxv3; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } if ((dy < edgely) || (dy >= edgery)) { dy = ppart[1+idimp*(j+npoff)]; ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)]; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = ppart[idimp*(j+npoff)]; ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)]; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j]; cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j]; cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j]; } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff)] += scu[3*i]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j]; if (nn > mx) { #pragma omp atomic cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j]; #pragma omp atomic cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j]; #pragma omp atomic cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cgrjppostf2l(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP version using guard cells data deposited in tiles particles stored segmented array 47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][n][0] = position x of particle n in tile m ppart[m][n][1] = position y of particle n in tile m ppart[m][n][2] = x momentum of particle n in tile m ppart[m][n][3] = y momentum of particle n in tile m ppart[m][n][4] = z momentum of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp; int i, j, k, ih, nh, nn, mm, mxv3; float ci2, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz, p2, gami; float anx, any, edgelx, edgely, edgerx, edgery; float scu[3*MXV*MYV]; /* float scu[3*(mx+1)*(my+1)]; */ mxv3 = 3*(mx + 1); ci2 = ci*ci; anx = (float) nx; any = (float) ny; /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \ dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,p2,gami,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; npoff = nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; ih = 0; nh = 0; nn += 1; mm += 1; /* zero out local accumulator */ for (j = 0; j < mxv3*(my+1); j++) { scu[j] = 0.0f; } /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { /* find interpolation weights */ x = ppart[idimp*(j+npoff)]; y = ppart[1+idimp*(j+npoff)]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; /* find inverse gamma */ vx = ppart[2+idimp*(j+npoff)]; vy = ppart[3+idimp*(j+npoff)]; vz = ppart[4+idimp*(j+npoff)]; p2 = vx*vx + vy*vy + vz*vz; gami = 1.0/sqrtf(1.0 + p2*ci2); /* calculate weights */ nn = 3*(nn - noff) + mxv3*(mm - moff); amx = qm - dxp; amy = 1.0 - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx *= gami; vy *= gami; vz *= gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += mxv3; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 3; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[idimp*(j+npoff)] = dx; ppart[1+idimp*(j+npoff)] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j]; cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j]; cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j]; } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[3*(i+noff+nxv*moff)] += scu[3*i]; #pragma omp atomic cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i]; #pragma omp atomic cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i]; if (mm > my) { #pragma omp atomic cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)]; #pragma omp atomic cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)]; #pragma omp atomic cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j]; #pragma omp atomic cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j]; #pragma omp atomic cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j]; if (nn > mx) { #pragma omp atomic cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j]; #pragma omp atomic cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j]; #pragma omp atomic cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j]; } } /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void cpporder2l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, moff, npp, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr; float anx, any, edgelx, edgely, edgerx, edgery, dx, dy; int ks[8]; mxy1 = mx1*my1; anx = (float) nx; any = (float) ny; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(j,k,noff,moff,npp,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely,edgerx, \ edgery) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; /* clear counters */ for (j = 0; j < 8; j++) { ncl[j+8*k] = 0; } /* loop over particles in tile */ for (j = 0; j < npp; j++) { dx = ppart[idimp*(j+nppmx*k)]; dy = ppart[1+idimp*(j+nppmx*k)]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[idimp*(j+nppmx*k)] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[idimp*(j+nppmx*k)] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[1+idimp*(j+nppmx*k)] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[1+idimp*(j+nppmx*k)] = dy; } else { ist += 3; } } if (ist > 0) { ncl[ist+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxy1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks) for (k = 0; k < mxy1; k++) { npp = kpic[k]; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 8; ii++) { if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void cpporderf2l(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the cgppushf2l procedure. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[k][n][0] = position x of particle n in tile k ppart[k][n][1] = position y of particle n in tile k ppbuff[k][n][i] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, npp, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, isum; int ip, j1, j2, kxl, kxr, kk, kl, kr; int ks[8]; mxy1 = mx1*my1; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,isum,ist,nh,ip,j1,ii) for (k = 0; k < mxy1; k++) { /* find address offset for ordered ppbuff array */ isum = 0; for (j = 0; j < 8; j++) { ist = ncl[j+8*k]; ncl[j+8*k] = isum; isum += ist; } nh = ihole[2*(ntmax+1)*k]; ip = 0; /* loop over particles leaving tile */ for (j = 0; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1; ist = ihole[1+2*(j+1+(ntmax+1)*k)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[i+idimp*(ii+npbmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks) for (k = 0; k < mxy1; k++) { npp = kpic[k]; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*(ntmax+1)*k]; ncoff = 0; ih = 0; ist = 0; j1 = 0; for (ii = 0; ii < 8; ii++) { if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; for (j = 0; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*k)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[i+idimp*(j1+nppmx*k)] = ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; for (j = 0; j < ip; j++) { j1 = npp - j - 1; j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1; if (j1 > j2) { /* move particle only if it is below current hole */ for (i = 0; i < idimp; i++) { ppart[i+idimp*(j2+nppmx*k)] = ppart[i+idimp*(j1+nppmx*k)]; } } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void cbguard2l(float bxy[], int nx, int ny, int nxe, int nye) { /* replicate extended periodic vector field bxy linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 local data */ int j, k; /* copy edges of extended field */ for (k = 0; k < ny; k++) { bxy[3*nx+3*nxe*k] = bxy[3*nxe*k]; bxy[1+3*nx+3*nxe*k] = bxy[1+3*nxe*k]; bxy[2+3*nx+3*nxe*k] = bxy[2+3*nxe*k]; } for (j = 0; j < nx; j++) { bxy[3*j+3*nxe*ny] = bxy[3*j]; bxy[1+3*j+3*nxe*ny] = bxy[1+3*j]; bxy[2+3*j+3*nxe*ny] = bxy[2+3*j]; } bxy[3*nx+3*nxe*ny] = bxy[0]; bxy[1+3*nx+3*nxe*ny] = bxy[1]; bxy[2+3*nx+3*nxe*ny] = bxy[2]; return; } /*--------------------------------------------------------------------*/ void cacguard2l(float cu[], int nx, int ny, int nxe, int nye) { /* accumulate extended periodic vector field cu linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 local data */ int j, k; /* accumulate edges of extended field */ for (k = 0; k < ny; k++) { cu[3*nxe*k] += cu[3*nx+3*nxe*k]; cu[1+3*nxe*k] += cu[1+3*nx+3*nxe*k]; cu[2+3*nxe*k] += cu[2+3*nx+3*nxe*k]; cu[3*nx+3*nxe*k] = 0.0; cu[1+3*nx+3*nxe*k] = 0.0; cu[2+3*nx+3*nxe*k] = 0.0; } for (j = 0; j < nx; j++) { cu[3*j] += cu[3*j+3*nxe*ny]; cu[1+3*j] += cu[1+3*j+3*nxe*ny]; cu[2+3*j] += cu[2+3*j+3*nxe*ny]; cu[3*j+3*nxe*ny] = 0.0; cu[1+3*j+3*nxe*ny] = 0.0; cu[2+3*j+3*nxe*ny] = 0.0; } cu[0] += cu[3*nx+3*nxe*ny]; cu[1] += cu[1+3*nx+3*nxe*ny]; cu[2] += cu[2+3*nx+3*nxe*ny]; cu[3*nx+3*nxe*ny] = 0.0; cu[1+3*nx+3*nxe*ny] = 0.0; cu[2+3*nx+3*nxe*ny] = 0.0; return; } /*--------------------------------------------------------------------*/ void caguard2l(float q[], int nx, int ny, int nxe, int nye) { /* accumulate extended periodic scalar field q linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 local data */ int j, k; /* accumulate edges of extended field */ for (k = 0; k < ny; k++) { q[nxe*k] += q[nx+nxe*k]; q[nx+nxe*k] = 0.0; } for (j = 0; j < nx; j++) { q[j] += q[j+nxe*ny]; q[j+nxe*ny] = 0.0; } q[0] += q[nx+nxe*ny]; q[nx+nxe*ny] = 0.0; return; } /*--------------------------------------------------------------------*/ void cmpois23(float complex q[], float complex fxy[], int isign, float complex ffc[], float ax, float ay, float affp, float *we, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. Zeros out z component. for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 equation used is: fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx], fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx], fz[ky][kx] = zero, where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx], s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0. q[k][j] = complex charge density for fourier mode (j,k) fxy[k][j][0] = x component of complex force/charge, fxy[k][j][1] = y component of complex force/charge, fxy[k][j][2] = zero, all for fourier mode (j,k) if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated cimag(ffc[k][j]) = finite-size particle shape factor s for fourier mode (j,k) creal(ffc[k][j]) = potential green's function g for fourier mode (j,k) ax/ay = half-width of particle in x/y direction affp = normalization constant = nx*ny/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2) nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, dkx, dky, at1, at2, at3, at4; float complex zero, zt1, zt2; double wp, sum1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; if (isign != 0) goto L30; /* prepare form factor array */ for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at1 = dky*dky; at2 = pow((dky*ay),2); for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at3 = dkx*dkx + at1; at4 = exp(-0.5*(pow((dkx*ax),2) + at2)); if (at3==0.0) { ffc[j+kk] = affp + 1.0*_Complex_I; } else { ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I; } } } return; /* calculate force/charge and sum field energy */ L30: sum1 = 0.0; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum1) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxy[3*j+3*kj] = at2*zt1; fxy[1+3*j+3*kj] = at3*zt1; fxy[2+3*j+3*kj] = zero; fxy[3*j+3*k1] = at2*zt2; fxy[1+3*j+3*k1] = -at3*zt2; fxy[2+3*j+3*k1] = zero; wp += at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxy[3*kj] = zero; fxy[1+3*kj] = at3*zt1; fxy[2+3*kj] = zero; fxy[3*k1] = zero; fxy[1+3*k1] = zero; fxy[2+3*k1] = zero; wp += at1*(q[kj]*conjf(q[kj])); sum1 += wp; } wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = 3*nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxy[3*j] = at2*zt1; fxy[1+3*j] = zero; fxy[2+3*j] = zero; fxy[3*j+k1] = zero; fxy[1+3*j+k1] = zero; fxy[2+3*j+k1] = zero; wp += at1*(q[j]*conjf(q[j])); } fxy[0] = zero; fxy[1] = zero; fxy[2] = zero; fxy[k1] = zero; fxy[1+k1] = zero; fxy[2+k1] = zero; sum1 += wp; *we = sum1*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void cmcuperp2(float complex cu[], int nx, int ny, int nxvh, int nyv) { /* this subroutine calculates the transverse current in fourier space input: all, output: cu approximate flop count is: 36*nxc*nyc and nxc*nyc divides where nxc = nx/2 - 1, nyc = ny/2 - 1 the transverse current is calculated using the equation: cux[ky][kx] = cux[ky][kx] -kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky) cuy[ky][kx] = cuy[ky][kx] -ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky) where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0, and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0. cu[k][j][i] = complex current density for fourier mode (j,k) nx/ny = system length in x/y direction nxvh = first dimension of current array, must be >= nxh nyv = second dimension of current array, must be >= ny local data */ int nxh, nyh, j, k, k1, kj; float dnx, dny, dkx, dky, dky2, at1; float complex zero, zt1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; /* calculate transverse part of current */ /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for private(j,k,k1,kj,dky,dky2,dkx,at1,zt1) for (k = 1; k < nyh; k++) { dky = dny*(float) k; dky2 = dky*dky; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1./(dkx*dkx + dky2); zt1 = at1*(dkx*cu[3*j+3*kj] + dky*cu[1+3*j+3*kj]); cu[3*j+3*kj] -= dkx*zt1; cu[1+3*j+3*kj] -= dky*zt1; zt1 = at1*(dkx*cu[3*j+3*k1] - dky*cu[1+3*j+3*k1]); cu[3*j+3*k1] -= dkx*zt1; cu[1+3*j+3*k1] += dky*zt1; } /* mode numbers kx = 0, nx/2 */ cu[1+3*kj] = zero; cu[3*k1] = zero; cu[1+3*k1] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = 3*nxvh*nyh; for (j = 1; j < nxh; j++) { cu[3*j] = zero; cu[3*j+k1] = zero; cu[1+3*j+k1] = zero; } cu[0] = zero; cu[1] = zero; cu[k1] = zero; cu[1+k1] = zero; return; } /*--------------------------------------------------------------------*/ void cmibpois23(float complex cu[], float complex bxy[], float complex ffc[], float ci, float *wm, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d poisson's equation in fourier space for magnetic field, with periodic boundary conditions. input: cu,ffc,ci,nx,ny,nxv,nyhd, output: bxy,wm approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 the magnetic field is calculated using the equations: bx[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*ky*cuz[ky][kx], by[ky][kx] = -ci*ci*sqrt(-1)*g[ky][kx]*kx*cuz[ky][kx], bz[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*(kx*cuy[ky][kx]-ky*cux[ky][kx]), where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx], s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi) = 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0. cu[k][j][i] = complex current density for fourier mode (j,k) bxy[k][j][i] = i component of complex magnetic field all for fourier mode (j,k) cimag(ffc[k][j]) = finite-size particle shape factor s for fourier mode (j,k) creal(ffc[k][j]) = potential green's function g for fourier mode (j,k) ci = reciprocal of velocity of light magnetic field energy is also calculated, using wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci* |cu[ky][kx]*s[ky][kx]|**2), where affp = normalization constant = nx*ny/np, where np=number of particles this expression is valid only if the current is divergence-free nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, dky, ci2, at1, at2, at3; float complex zero, zt1, zt2, zt3; double wp, sum1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; ci2 = ci*ci; /* calculate magnetic field and sum field energy */ sum1 = 0.0; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp) \ reduction(+:sum1) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; at1 = at1*cimagf(ffc[j+kk]); zt1 = -cimagf(cu[2+3*j+3*kj]) + crealf(cu[2+3*j+3*kj])*_Complex_I; zt2 = -cimagf(cu[1+3*j+3*kj]) + crealf(cu[1+3*j+3*kj])*_Complex_I; zt3 = -cimagf(cu[3*j+3*kj]) + crealf(cu[3*j+3*kj])*_Complex_I; bxy[3*j+3*kj] = at3*zt1; bxy[1+3*j+3*kj] = -at2*zt1; bxy[2+3*j+3*kj] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+3*j+3*k1]) + crealf(cu[2+3*j+3*k1])*_Complex_I; zt2 = -cimagf(cu[1+3*j+3*k1]) + crealf(cu[1+3*j+3*k1])*_Complex_I; zt3 = -cimagf(cu[3*j+3*k1]) + crealf(cu[3*j+3*k1])*_Complex_I; bxy[3*j+3*k1] = -at3*zt1; bxy[1+3*j+3*k1] = -at2*zt1; bxy[2+3*j+3*k1] = at2*zt2 + at3*zt3; wp += at1*(cu[3*j+3*kj]*conjf(cu[3*j+3*kj]) + cu[1+3*j+3*kj]*conjf(cu[1+3*j+3*kj]) + cu[2+3*j+3*kj]*conjf(cu[2+3*j+3*kj]) + cu[3*j+3*k1]*conjf(cu[3*j+3*k1]) + cu[1+3*j+3*k1]*conjf(cu[1+3*j+3*k1]) + cu[2+3*j+3*k1]*conjf(cu[2+3*j+3*k1])); } /* mode numbers kx = 0, nx/2 */ at1 = ci2*crealf(ffc[kk]); at3 = at1*dny*(float) k; at1 = at1*cimagf(ffc[kk]); zt1 = -cimagf(cu[2+3*kj]) + crealf(cu[2+3*kj])*_Complex_I; zt3 = -cimagf(cu[3*kj]) + crealf(cu[3*kj])*_Complex_I; bxy[3*kj] = at3*zt1; bxy[1+3*kj] = zero; bxy[2+3*kj] = -at3*zt3; bxy[3*k1] = zero; bxy[1+3*k1] = zero; bxy[2+3*k1] = zero; wp += at1*(cu[3*kj]*conjf(cu[3*kj]) + cu[1+3*kj]*conjf(cu[1+3*kj]) + cu[2+3*kj]*conjf(cu[2+3*kj])); sum1 += wp; } wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = 3*nxvh*nyh; for (j = 1; j < nxh; j++) { at1 = ci2*crealf(ffc[j]); at2 = at1*dnx*(float) j; at1 = at1*cimagf(ffc[j]); zt1 = -cimagf(cu[2+3*j]) + crealf(cu[2+3*j])*_Complex_I; zt2 = -cimagf(cu[1+3*j]) + crealf(cu[1+3*j])*_Complex_I; bxy[3*j] = zero; bxy[1+3*j] = -at2*zt1; bxy[2+3*j] = at2*zt2; bxy[3*j+k1] = zero; bxy[1+3*j+k1] = zero; bxy[2+3*j+k1] = zero; wp += at1*(cu[3*j]*conjf(cu[3*j]) + cu[1+3*j]*conjf(cu[1+3*j]) + cu[2+3*j]*conjf(cu[2+3*j])); } bxy[0] = zero; bxy[1] = zero; bxy[2] = zero; bxy[k1] = zero; bxy[1+k1] = zero; bxy[2+k1] = zero; sum1 += wp; *wm = sum1*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void cmmaxwel2(float complex exy[], float complex bxy[], float complex cu[], float complex ffc[], float ci, float dt, float *wf, float *wm, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d maxwell's equation in fourier space for transverse electric and magnetic fields with periodic boundary conditions input: all, output: wf, wm, exy, bxy approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 the magnetic field is first updated half a step using the equations: bx[ky][kx] = bx[ky][kx] - .5*dt*sqrt(-1)*ky*ez[ky][kx] by[ky][kx] = by[ky][kx] + .5*dt*sqrt(-1)*kx*ez[ky][kx] bz[ky][kx] = bz[ky][kx] - .5*dt*sqrt(-1)*(kx*ey[ky][kx]-ky*ex[ky][kx]) the electric field is then updated a whole step using the equations: ex[ky][kx] = ex[ky][kx] + c2*dt*sqrt(-1)*ky*bz[ky][kx] - affp*dt*cux[ky][kx]*s[ky][kx] ey[ky][kx] = ey[ky][kx] - c2*dt*sqrt(-1)*kx*bz[ky][kx] - affp*dt*cuy[ky][kx]*s[ky][kx] ez[ky][kx] = ez[ky][kx] + c2*dt*sqrt(-1)*(kx*by[ky][kx]-ky*bx[ky][kx]) - affp*dt*cuz[ky][kx]*s[ky][kx] the magnetic field is finally updated the remaining half step with the new electric field and the previous magnetic field equations. where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci) and s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2) j,k = fourier mode numbers, except for ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0, ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0, ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0. and similarly for bx, by, bz. cu[k][j][i] = complex current density exy[k][j][i] = complex transverse electric field bxy[k][j][i] = complex magnetic field for component i, all for fourier mode (j,k) creal(ffc[0][0]) = affp = normalization constant = nx*ny/np, where np=number of particles cimag(ffc[k][j]) = finite-size particle shape factor s. s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2) for fourier mode (j-1,k-1) ci = reciprocal of velocity of light dt = time interval between successive calculations transverse electric field energy is also calculated, using wf = nx*ny**sum((1/affp)*|exy[ky][kx]|**2) magnetic field energy is also calculated, using wm = nx*ny**sum((c2/affp)*|bxy[ky][kx]|**2) nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, dth, c2, cdt, affp, anorm, dkx, dky, afdt, adt; float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9; double wp, ws, sum1, sum2; if (ci <= 0.0) return; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dth = 0.5*dt; c2 = 1.0/(ci*ci); cdt = c2*dt; affp = creal(ffc[0]); adt = affp*dt; zero = 0.0 + 0.0*_Complex_I; anorm = 1.0/affp; /* update electromagnetic field and sum field energies */ sum1 = 0.0; sum2 = 0.0; /* calculate the electromagnetic fields */ /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,dkx,afdt,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8, \ zt9,ws,wp) \ reduction(+:sum1,sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; ws = 0.0; wp = 0.0; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk]); /* update magnetic field half time step, ky > 0 */ zt1 = -cimagf(exy[2+3*j+3*kj]) + crealf(exy[2+3*j+3*kj])*_Complex_I; zt2 = -cimagf(exy[1+3*j+3*kj]) + crealf(exy[1+3*j+3*kj])*_Complex_I; zt3 = -cimagf(exy[3*j+3*kj]) + crealf(exy[3*j+3*kj])*_Complex_I; zt4 = bxy[3*j+3*kj] - dth*(dky*zt1); zt5 = bxy[1+3*j+3*kj] + dth*(dkx*zt1); zt6 = bxy[2+3*j+3*kj] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exy[3*j+3*kj] + cdt*(dky*zt1) - afdt*cu[3*j+3*kj]; zt8 = exy[1+3*j+3*kj] - cdt*(dkx*zt1) - afdt*cu[1+3*j+3*kj]; zt9 = exy[2+3*j+3*kj] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+3*j+3*kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exy[3*j+3*kj] = zt7; exy[1+3*j+3*kj] = zt8; exy[2+3*j+3*kj] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxy[3*j+3*kj] = zt4; bxy[1+3*j+3*kj] = zt5; bxy[2+3*j+3*kj] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); /* update magnetic field half time step, ky < 0 */ zt1 = -cimagf(exy[2+3*j+3*k1]) + crealf(exy[2+3*j+3*k1])*_Complex_I; zt2 = -cimagf(exy[1+3*j+3*k1]) + crealf(exy[1+3*j+3*k1])*_Complex_I; zt3 = -cimagf(exy[3*j+3*k1]) + crealf(exy[3*j+3*k1])*_Complex_I; zt4 = bxy[3*j+3*k1] + dth*(dky*zt1); zt5 = bxy[1+3*j+3*k1] + dth*(dkx*zt1); zt6 = bxy[2+3*j+3*k1] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exy[3*j+3*k1] - cdt*(dky*zt1) - afdt*cu[3*j+3*k1]; zt8 = exy[1+3*j+3*k1] - cdt*(dkx*zt1) - afdt*cu[1+3*j+3*k1]; zt9 = exy[2+3*j+3*k1] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+3*j+3*k1]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exy[3*j+3*k1] = zt7; exy[1+3*j+3*k1] = zt8; exy[2+3*j+3*k1] = zt9; ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); zt4 += dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxy[3*j+3*k1] = zt4; bxy[1+3*j+3*k1] = zt5; bxy[2+3*j+3*k1] = zt6; wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); } /* mode numbers kx = 0, nx/2 */ afdt = adt*cimagf(ffc[kk]); /* update magnetic field half time step */ zt1 = -cimagf(exy[2+3*kj]) + crealf(exy[2+3*kj])*_Complex_I; zt3 = -cimagf(exy[3*kj]) + crealf(exy[3*kj])*_Complex_I; zt4 = bxy[3*kj] - dth*(dky*zt1); zt6 = bxy[2+3*kj] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exy[3*kj] + cdt*(dky*zt1) - afdt*cu[3*kj]; zt9 = exy[2+3*kj] - cdt*(dky*zt3) - afdt*cu[2+3*kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exy[3*kj] = zt7; exy[1+3*kj] = zero; exy[2+3*kj] = zt9; ws += anorm*(zt7*conjf(zt7) + zt9*conjf(zt9)); zt4 -= dth*(dky*zt1); zt6 += dth*(dky*zt3); bxy[3*kj] = zt4; bxy[1+3*kj] = zero; bxy[2+3*kj] = zt6; wp += anorm*(zt4*conjf(zt4) + zt6*conjf(zt6)); bxy[3*k1] = zero; bxy[1+3*k1] = zero; bxy[2+3*k1] = zero; exy[3*k1] = zero; exy[1+3*k1] = zero; exy[2+3*k1] = zero; sum1 += ws; sum2 += wp; } ws = 0.0; wp = 0.0; /* mode numbers ky = 0, ny/2 */ k1 = 3*nxvh*nyh; for (j = 1; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j]); /* update magnetic field half time step */ zt1 = -cimagf(exy[2+3*j]) + crealf(exy[2+3*j])*_Complex_I; zt2 = -cimagf(exy[1+3*j]) + crealf(exy[1+3*j])*_Complex_I; zt5 = bxy[1+3*j] + dth*(dkx*zt1); zt6 = bxy[2+3*j] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt8 = exy[1+3*j] - cdt*(dkx*zt1) - afdt*cu[1+3*j]; zt9 = exy[2+3*j] + cdt*(dkx*zt2) - afdt*cu[2+3*j]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; exy[3*j] = zero; exy[1+3*j] = zt8; exy[2+3*j] = zt9; ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2); bxy[3*j] = zero; bxy[1+3*j] = zt5; bxy[2+3*j] = zt6; wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); bxy[3*j+k1] = zero; bxy[1+3*j+k1] = zero; bxy[2+3*j+k1] = zero; exy[3*j+k1] = zero; exy[1+3*j+k1] = zero; exy[2+3*j+k1] = zero; } bxy[0] = zero; bxy[1] = zero; bxy[2] = zero; exy[0] = zero; exy[1] = zero; exy[2] = zero; bxy[k1] = zero; bxy[1+k1] = zero; bxy[2+k1] = zero; exy[k1] = zero; exy[1+k1] = zero; exy[2+k1] = zero; sum1 += ws; sum2 += wp; *wf = sum1*(float) (nx*ny); *wm = sum2*c2*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void cmemfield2(float complex fxy[], float complex exy[], float complex ffc[], int isign, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine either adds complex vector fields if isign > 0 or copies complex vector fields if isign < 0 includes additional smoothing local data */ int i, j, k, nxh, nyh, k1, kk, kj; float at1; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; /* add the fields */ if (isign > 0) { #pragma omp parallel for private(i,j,k,k1,kk,kj,at1) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); for (i = 0; i < 3; i++) { fxy[i+3*j+3*kj] += exy[i+3*j+3*kj]*at1; fxy[i+3*j+3*k1] += exy[i+3*j+3*k1]*at1; } } } k1 = 3*nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j]); for (i = 0; i < 3; i++) { fxy[i+3*j] += exy[i+3*j]*at1; fxy[i+3*j+k1] += exy[i+3*j+k1]*at1; } } } /* copy the fields */ else if (isign < 0) { #pragma omp parallel for private(i,j,k,k1,kk,kj,at1) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); for (i = 0; i < 3; i++) { fxy[i+3*j+3*kj] = exy[i+3*j+3*kj]*at1; fxy[i+3*j+3*k1] = exy[i+3*j+3*k1]*at1; } } } k1 = 3*nxvh*nyh; for (j = 0; j < nxh; j++) { at1 = cimagf(ffc[j]); for (i = 0; i < 3; i++) { fxy[i+3*j] = exy[i+3*j]*at1; fxy[i+3*j+k1] = exy[i+3*j+k1]*at1; } } } return; } /*--------------------------------------------------------------------*/ void cwfft2rinit(int mixup[], float complex sct[], int indx, int indy, int nxhyd, int nxyhd) { /* this subroutine calculates tables needed by a two dimensional real to complex fast fourier transform and its inverse. input: indx, indy, nxhyd, nxyhd output: mixup, sct mixup = array of bit reversed addresses sct = sine/cosine table indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy nxhyd = maximum of (nx/2,ny) nxyhd = one half of maximum of (nx,ny) written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nxy, nxhy, nxyh; int j, k, lb, ll, jb, it; float dnxy, arg; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; /* bit-reverse index table: mixup[j] = 1 + reversed bits of j */ for (j = 0; j < nxhy; j++) { lb = j; ll = 0; for (k = 0; k < indx1y; k++) { jb = lb/2; it = lb - 2*jb; lb = jb; ll = 2*ll + it; } mixup[j] = ll + 1; } /* sine/cosine table for the angles 2*n*pi/nxy */ nxyh = nxy/2; dnxy = 6.28318530717959/(float) nxy; for (j = 0; j < nxyh; j++) { arg = dnxy*(float) j; sct[j] = cosf(arg) - sinf(arg)*_Complex_I; } return; } /*--------------------------------------------------------------------*/ void cfft2rmxx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in x is performed f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx)) if isign = 1, a forward fourier transform in x is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nrxb, joff; float ani; float complex t1, t2, t3; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; if (isign > 0) goto L70; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,ani,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } ani = 2.0*ani; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } return; /* forward fourier transform */ L70: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,t1,t2,t3) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); t2 = t1*f[j2+joff]; f[j2+joff] = f[j1+joff] - t2; f[j1+joff] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft2rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in y is performed f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in y is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff; float complex t1, t2; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; if (isign > 0) goto L70; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; t1 = f[i+k1]; f[i+k1] = f[i+koff]; f[i+koff] = t1; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = sct[kmr*j]; t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[koff] + t1) + crealf(f[koff] - t1)*_Complex_I); f[koff] = 0.5*(crealf(f[koff] + t1) + cimagf(f[koff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L70: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[koff] - t1); f[koff] += t1; } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; t1 = f[i+k1]; f[i+k1] = f[i+koff]; f[i+koff] = t1; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = conjf(sct[kmr*j]); t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cfft2rm3x(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of 3 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms are performed f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]* exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, two forward fourier transforms are performed f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:2] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:2] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:2]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nrxb; float at1, at2, ani; float complex t1, t2, t3, t4; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; if (isign > 0) goto L100; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,at2,ani,t1,t2,t3 \ ,t4) for (i = nyi-1; i < nyt; i++) { joff = 3*nxhd*i; /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = crealf(f[2+3*j+joff]); f[2+3*j+joff] = crealf(f[1+3*j+joff]) + cimagf(f[2+3*j+joff])*_Complex_I; at2 = cimagf(f[1+3*j+joff]); f[1+3*j+joff] = cimagf(f[3*j+joff]) + at1*_Complex_I; f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[3*j1+joff]; t2 = f[1+3*j1+joff]; t3 = f[2+3*j1+joff]; f[3*j1+joff] = f[3*j+joff]; f[1+3*j1+joff] = f[1+3*j+joff]; f[2+3*j1+joff] = f[2+3*j+joff]; f[3*j+joff] = t1; f[1+3*j+joff] = t2; f[2+3*j+joff] = t3; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = sct[kmr*j]; t2 = t1*f[3*j2+joff]; t3 = t1*f[1+3*j2+joff]; t4 = t1*f[2+3*j2+joff]; f[3*j2+joff] = f[3*j1+joff] - t2; f[1+3*j2+joff] = f[1+3*j1+joff] - t3; f[2+3*j2+joff] = f[2+3*j1+joff] - t4; f[3*j1+joff] += t2; f[1+3*j1+joff] += t3; f[2+3*j1+joff] += t4; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+3*(nxh-j)+joff]); t1 = f[jj+3*j+joff] + t2; t2 = (f[jj+3*j+joff] - t2)*t3; f[jj+3*j+joff] = ani*(t1 + t2); f[jj+3*(nxh-j)+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (jj = 0; jj < 3; jj++) { f[jj+3*nxhh+joff] = ani*conjf(f[jj+3*nxhh+joff]); f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I); } } return; /* forward fourier transform */ L100: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,at2,t1,t2,t3,t4) for (i = nyi-1; i < nyt; i++) { joff = 3*nxhd*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+3*(nxh-j)+joff]); t1 = f[jj+3*j+joff] + t2; t2 = (f[jj+3*j+joff] - t2)*t3; f[jj+3*j+joff] = t1 + t2; f[jj+3*(nxh-j)+joff] = conjf(t1 - t2); } } for (jj = 0; jj < 3; jj++) { f[jj+3*nxhh+joff] = 2.0*conjf(f[jj+3*nxhh+joff]); f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { t1 = f[3*j1+joff]; t2 = f[1+3*j1+joff]; t3 = f[2+3*j1+joff]; f[3*j1+joff] = f[3*j+joff]; f[1+3*j1+joff] = f[1+3*j+joff]; f[2+3*j1+joff] = f[2+3*j+joff]; f[3*j+joff] = t1; f[1+3*j+joff] = t2; f[2+3*j+joff] = t3; } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = j + k1; j2 = j + k2; t1 = conjf(sct[kmr*j]); t2 = t1*f[3*j2+joff]; t3 = t1*f[1+3*j2+joff]; t4 = t1*f[2+3*j2+joff]; f[3*j2+joff] = f[3*j1+joff] - t2; f[1+3*j2+joff] = f[1+3*j1+joff] - t3; f[2+3*j2+joff] = f[2+3*j1+joff] - t4; f[3*j1+joff] += t2; f[1+3*j1+joff] += t3; f[2+3*j1+joff] += t4; } } ns = ns2; } /* swap complex components */ for (j = 0; j < nxh; j++) { at1 = crealf(f[2+3*j+joff]); f[2+3*j+joff] = cimagf(f[1+3*j+joff]) + cimagf(f[2+3*j+joff])*_Complex_I; at2 = crealf(f[1+3*j+joff]); f[1+3*j+joff] = at1 + cimagf(f[3*j+joff])*_Complex_I; f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I; } } return; } /*--------------------------------------------------------------------*/ void cfft2rm3y(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of 3 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms are performed f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2] * exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, two forward fourier transforms are performed f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:2] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:2] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:2]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff; int nryb; float complex t1, t2, t3, t4; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; if (isign > 0) goto L80; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 3*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 3*nxhd*k1; t1 = f[3*i+k1]; t2 = f[1+3*i+k1]; t3 = f[2+3*i+k1]; f[3*i+k1] = f[3*i+koff]; f[1+3*i+k1] = f[1+3*i+koff]; f[2+3*i+k1] = f[2+3*i+koff]; f[3*i+koff] = t1; f[1+3*i+koff] = t2; f[2+3*i+koff] = t3; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 3*nxhd*(j + k1); j2 = 3*nxhd*(j + k2); t1 = sct[kmr*j]; t2 = t1*f[3*i+j2]; t3 = t1*f[1+3*i+j2]; t4 = t1*f[2+3*i+j2]; f[3*i+j2] = f[3*i+j1] - t2; f[1+3*i+j2] = f[1+3*i+j1] - t3; f[2+3*i+j2] = f[2+3*i+j1] - t4; f[3*i+j1] += t2; f[1+3*i+j1] += t3; f[2+3*i+j1] += t4; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 3*nxhd*k; k1 = 3*nxhd*ny - koff; for (jj = 0; jj < 3; jj++) { t1 = f[jj+k1]; f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1) + crealf(f[jj+koff] - t1)*_Complex_I); f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1) + cimagf(f[jj+koff] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L80: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 3*nxhd*k; k1 = 3*nxhd*ny - koff; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; f[jj+k1] = conjf(f[jj+koff] - t1); f[jj+koff] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 3*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 3*nxhd*k1; t1 = f[3*i+k1]; t2 = f[1+3*i+k1]; t3 = f[2+3*i+k1]; f[3*i+k1] = f[3*i+koff]; f[1+3*i+k1] = f[1+3*i+koff]; f[2+3*i+k1] = f[2+3*i+koff]; f[3*i+koff] = t1; f[1+3*i+koff] = t2; f[2+3*i+koff] = t3; } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 3*nxhd*(j + k1); j2 = 3*nxhd*(j + k2); t1 = conjf(sct[kmr*j]); t2 = t1*f[3*i+j2]; t3 = t1*f[1+3*i+j2]; t4 = t1*f[2+3*i+j2]; f[3*i+j2] = f[3*i+j1] - t2; f[1+3*i+j2] = f[1+3*i+j1] - t3; f[2+3*i+j2] = f[2+3*i+j1] - t4; f[3*i+j1] += t2; f[1+3*i+j1] += t3; f[2+3*i+j1] += t4; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void cwfft2rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for real to complex fft, with packed data */ /* parallelized with OpenMP */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /*--------------------------------------------------------------------*/ void cwfft2rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for 3 2d real to complex ffts */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ cfft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ cfft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void cdistr2h_(float *part, float *vtx, float *vty, float *vtz, float *vdx, float *vdy, float *vdz, int *npx, int *npy, int *idimp, int *nop, int *nx, int *ny, int *ipbc) { cdistr2h(part,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,*npy,*idimp,*nop, *nx,*ny,*ipbc); return; } /*--------------------------------------------------------------------*/ void cdblkp2l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mx1, int *mxy1, int *irc) { cdblkp2l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,irc); return; } /*--------------------------------------------------------------------*/ void cppmovin2l_(float *part, float *ppart, int *kpic, int *nppmx, int *idimp, int *nop, int *mx, int *my, int *mx1, int *mxy1, int *irc) { cppmovin2l(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1, irc); return; } /*--------------------------------------------------------------------*/ void cppcheck2l_(float *ppart, int *kpic, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *irc) { cppcheck2l(ppart,kpic,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*my1,irc); return; } /*--------------------------------------------------------------------*/ void cgbppush23l_(float *ppart, float *fxy, float *bxy, int *kpic, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { cgbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx, *ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgbppushf23l_(float *ppart, float *fxy, float *bxy, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { cgbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp, *nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgrbppush23l_(float *ppart, float *fxy, float *bxy, int *kpic, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { cgrbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx, *nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgrbppushf23l_(float *ppart, float *fxy, float *bxy, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { cgrbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek, *idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1, *ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgppost2l_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1) { cgppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1, *mxy1); return; } /*--------------------------------------------------------------------*/ void cgjppost2l_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { cgjppost2l(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,*nxv, *nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgjppostf2l_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { cgjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny, *mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cgrjppost2l_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { cgrjppost2l(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,*my, *nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cgrjppostf2l_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { cgrjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,*nx, *ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cpporder2l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { cpporder2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,*my, *mx1,*my1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cpporderf2l_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { cpporderf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cbguard2l_(float *bxy, int *nx, int *ny, int *nxe, int *nye) { cbguard2l(bxy,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void cacguard2l_(float *cu, int *nx, int *ny, int *nxe, int *nye) { cacguard2l(cu,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void caguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) { caguard2l(q,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void cmpois23_(float complex *q, float complex *fxy, int *isign, float complex *ffc, float *ax, float *ay, float *affp, float *we, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { cmpois23(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv,*nxhd, *nyhd); return; } /*--------------------------------------------------------------------*/ void cmcuperp2_(float complex *cu, int *nx, int *ny, int *nxvh, int *nyv) { cmcuperp2(cu,*nx,*ny,*nxvh,*nyv); return; } /*--------------------------------------------------------------------*/ void cmibpois23_(float complex *cu, float complex *bxy, float complex *ffc, float *ci, float *wm, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { cmibpois23(cu,bxy,ffc,*ci,wm,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd); return; } /*--------------------------------------------------------------------*/ void cmmaxwel2_(float complex *exy, float complex *bxy, float complex *cu, float complex *ffc, float *ci, float *dt, float *wf, float *wm, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { cmmaxwel2(exy,bxy,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,*nxhd, *nyhd); return; } /*--------------------------------------------------------------------*/ void cmemfield2_(float complex *fxy, float complex *exy, float complex *ffc, int *isign, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { cmemfield2(fxy,exy,ffc,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy, int *nxhyd, int *nxyhd) { cwfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rmxx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nyi, int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rmxx(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rmxy_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxi, int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rmxy(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rm3x_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nyi, int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rm3x(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cfft2rm3y_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxi, int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cfft2rm3y(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cwfft2rmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd); return; } /*--------------------------------------------------------------------*/ void cwfft2rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { cwfft2rm3(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd); return; }
GB_binop__rdiv_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_uint16 // A.*B function (eWiseMult): GB_AemultB__rdiv_uint16 // A*D function (colscale): GB_AxD__rdiv_uint16 // D*A function (rowscale): GB_DxB__rdiv_uint16 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_uint16 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_uint16 // C=scalar+B GB_bind1st__rdiv_uint16 // C=scalar+B' GB_bind1st_tran__rdiv_uint16 // C=A+scalar GB_bind2nd__rdiv_uint16 // C=A'+scalar GB_bind2nd_tran__rdiv_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_UNSIGNED (y, x, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_UINT16 || GxB_NO_RDIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 16) ; \ } GrB_Info GB_bind1st_tran__rdiv_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 16) ; \ } GrB_Info GB_bind2nd_tran__rdiv_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nonneg_frob.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../admm.h" /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ /** * @brief The proximal update for a non-negative factorization. This routine * projects 'primal' onto the non-negative orthant while adding a Frob. * norm regularizer. This scales primal by rho*inv((lambda+rho) * eye), * or more simply multiplies each entry by (rho/(lambda+rho)). It then * projects to the non-negative orthant. * * @param[out] primal The row-major matrix to update. * @param nrows The number of rows in primal. * @param ncols The number of columns in primal. * @param offset Not used. * @param data Not used. * @param rho Not used. * @param should_parallelize If true, parallelize. */ void nonneg_frob_prox( val_t * primal, idx_t const nrows, idx_t const ncols, idx_t const offset, void * data, val_t const rho, bool const should_parallelize) { val_t const lambda = *((val_t *) data); val_t const mult = rho / (lambda + rho); #pragma omp parallel for if(should_parallelize) for(idx_t i=0; i < nrows; ++i) { for(idx_t j=0; j < ncols; ++j) { idx_t const index = j + (i*ncols); val_t const new_val = primal[index] * mult; primal[index] = (new_val > 0.) ? new_val : 0.; } } } /** * @brief Free the single val_t allocated for Frobenius regularization. * * @param data The data to free. */ void nonneg_frob_free( void * data) { splatt_free(data); } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ splatt_error_type splatt_register_ntf_frob( splatt_cpd_opts * opts, splatt_val_t const multiplier, splatt_idx_t const * const modes_included, splatt_idx_t const num_modes) { for(idx_t m = 0; m < num_modes; ++m) { idx_t const mode = modes_included[m]; splatt_cpd_constraint * ntf_con = splatt_alloc_constraint(SPLATT_CON_ADMM); ntf_con->prox_func = nonneg_frob_prox; ntf_con->free_func = nonneg_frob_free; /* set hints to assist optimizations */ ntf_con->hints.row_separable = true; ntf_con->hints.sparsity_inducing = true; sprintf(ntf_con->description, "NTF-L1-REG (%0.1e)", multiplier); /* store multiplier */ val_t * mult = splatt_malloc(sizeof(*mult)); *mult = multiplier; ntf_con->data = mult; /* add to the CPD factorization */ splatt_register_constraint(opts, mode, ntf_con); /* memory will be freed by splatt_free_constraint() */ } return SPLATT_SUCCESS; }
convolutiondepthwise_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "0: \n" "mov v10.16b, %25.16b \n" // v10 = _bias0 "mov v11.16b, %25.16b \n" // v11 = _bias0 "fmla v8.4s, v16.4s, %19.s[1] \n" "fmla v10.4s, v16.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r11 "fmla v9.4s, v17.4s, %19.s[1] \n" "fmla v11.4s, v17.4s, %18.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r15 "fmla v8.4s, v17.4s, %20.s[1] \n" "fmla v10.4s, v17.4s, %19.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r12 "fmla v9.4s, v18.4s, %20.s[1] \n" "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %19.s[2] \n" "fmla v10.4s, v19.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %19.s[2] \n" "fmla v11.4s, v20.4s, %18.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r17 "fmla v8.4s, v21.4s, %19.s[3] \n" "fmla v10.4s, v21.4s, %18.s[2] \n" "add %4, %4, #32 \n" "fmla v9.4s, v22.4s, %19.s[3] \n" "fmla v11.4s, v22.4s, %18.s[2] \n" // r2 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r20 r24 r28 "fmla v8.4s, v19.4s, %20.s[0] \n" "fmla v10.4s, v19.4s, %18.s[3] \n" "fmla v9.4s, v20.4s, %20.s[0] \n" "fmla v11.4s, v20.4s, %18.s[3] \n" "add %5, %5, #32 \n" "fmla v8.4s, v12.4s, %20.s[2] \n" "fmla v10.4s, v12.4s, %19.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r21 "fmla v9.4s, v13.4s, %20.s[2] \n" "fmla v11.4s, v13.4s, %19.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r25 "fmla v8.4s, v13.4s, %21.s[2] \n" "fmla v10.4s, v13.4s, %20.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r22 "fmla v9.4s, v14.4s, %21.s[2] \n" "fmla v11.4s, v14.4s, %20.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r26 "fmla v8.4s, v21.4s, %20.s[3] \n" "fmla v10.4s, v21.4s, %19.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r23 "fmla v9.4s, v22.4s, %20.s[3] \n" "fmla v11.4s, v22.4s, %19.s[2] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r27 "fmla v8.4s, v19.4s, %21.s[0] \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "fmla v9.4s, v20.4s, %21.s[0] \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r3 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r30 r34 r38 "fmla v8.4s, v21.4s, %21.s[1] \n" "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v9.4s, v22.4s, %21.s[1] \n" "fmla v11.4s, v22.4s, %20.s[0] \n" "add %6, %6, #32 \n" "fmla v8.4s, v16.4s, %21.s[3] \n" "fmla v10.4s, v16.4s, %20.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r31 "fmla v9.4s, v17.4s, %21.s[3] \n" "fmla v11.4s, v17.4s, %20.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r35 "fmla v8.4s, v17.4s, %22.s[3] \n" "fmla v10.4s, v17.4s, %21.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r32 "fmla v9.4s, v18.4s, %22.s[3] \n" "fmla v11.4s, v18.4s, %21.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r36 "fmla v8.4s, v19.4s, %22.s[0] \n" "fmla v10.4s, v19.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r33 "fmla v9.4s, v20.4s, %22.s[0] \n" "fmla v11.4s, v20.4s, %20.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r37 "fmla v8.4s, v21.4s, %22.s[1] \n" "fmla v10.4s, v21.4s, %21.s[0] \n" "fmla v9.4s, v22.4s, %22.s[1] \n" "fmla v11.4s, v22.4s, %21.s[0] \n" // r4 "prfm pldl1keep, [%7, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n" // v12 v13 v14 = r40 r44 r48 "fmla v8.4s, v19.4s, %22.s[2] \n" "fmla v10.4s, v19.4s, %21.s[1] \n" "add %7, %7, #32 \n" "fmla v9.4s, v20.4s, %22.s[2] \n" "fmla v11.4s, v20.4s, %21.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r41 "fmla v8.4s, v12.4s, %23.s[0] \n" "fmla v10.4s, v12.4s, %21.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r45 "fmla v9.4s, v13.4s, %23.s[0] \n" "fmla v11.4s, v13.4s, %21.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r42 "fmla v8.4s, v13.4s, %24.s[0] \n" "fmla v10.4s, v13.4s, %22.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r46 "fmla v9.4s, v14.4s, %24.s[0] \n" "fmla v11.4s, v14.4s, %22.s[3] \n" // r0 and r5 "prfm pldl1keep, [%3, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n" // v16 v17 v18 = r00 r04 r08 "fmla v8.4s, v21.4s, %23.s[1] \n" "fmla v10.4s, v21.4s, %22.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r43 "fmla v9.4s, v22.4s, %23.s[1] \n" "fmla v11.4s, v22.4s, %22.s[0] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r47 "fmla v8.4s, v19.4s, %23.s[2] \n" "fmla v10.4s, v19.4s, %22.s[1] \n" "prfm pldl1keep, [%8, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n" // v12 v13 v14 = r50 r54 r58 "fmla v9.4s, v20.4s, %23.s[2] \n" "fmla v11.4s, v20.4s, %22.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v21.4s, %23.s[3] \n" "fmla v10.4s, v21.4s, %22.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v22.4s, %23.s[3] \n" "fmla v11.4s, v22.4s, %22.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v16.4s, %18.s[0] \n" "fmla v10.4s, v12.4s, %23.s[0] \n" "ext v24.16b, v13.16b, v14.16b, #4 \n" // r55 "fmla v9.4s, v17.4s, %18.s[0] \n" "fmla v11.4s, v13.4s, %23.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v8.4s, v17.4s, %19.s[0] \n" "fmla v10.4s, v13.4s, %24.s[0] \n" "ext v25.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v9.4s, v18.4s, %19.s[0] \n" "fmla v11.4s, v14.4s, %24.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v8.4s, v19.4s, %18.s[1] \n" "fmla v10.4s, v23.4s, %23.s[1] \n" "ext v26.16b, v13.16b, v14.16b, #8 \n" // r56 "fmla v9.4s, v20.4s, %18.s[1] \n" "fmla v11.4s, v24.4s, %23.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v21.4s, %18.s[2] \n" "fmla v10.4s, v25.4s, %23.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %18.s[2] \n" "fmla v11.4s, v26.4s, %23.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v19.4s, %18.s[3] \n" "fmla v10.4s, v23.4s, %23.s[3] \n" "ext v24.16b, v13.16b, v14.16b, #12 \n" // r57 "fmla v9.4s, v20.4s, %18.s[3] \n" "add %3, %3, #32 \n" "fmla v11.4s, v24.4s, %23.s[3] \n" "add %8, %8, #32 \n" // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "subs %w0, %w0, #1 \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%2], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"); } if (remain >= 4) { remain -= 4; asm volatile( // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" // v12 v13 = r10 r14 "mov v8.16b, %23.16b \n" // v8 = _bias0 "mov v9.16b, %23.16b \n" // v9 = _bias0 "fmul v10.4s, v12.4s, %17.s[1] \n" "fmul v11.4s, v12.4s, %16.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v8.4s, v13.4s, %18.s[1] \n" "fmla v9.4s, v13.4s, %17.s[0] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v10.4s, v21.4s, %17.s[2] \n" "fmla v11.4s, v21.4s, %16.s[1] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v8.4s, v22.4s, %17.s[3] \n" "fmla v9.4s, v22.4s, %16.s[2] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4s, v17.4s}, [%4] \n" // v16 v17 = r20 r24 "fmla v10.4s, v23.4s, %18.s[0] \n" "fmla v11.4s, v23.4s, %16.s[3] \n" "add %4, %4, #16 \n" "fmla v8.4s, v16.4s, %18.s[2] \n" "fmla v9.4s, v16.4s, %17.s[1] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v10.4s, v17.4s, %19.s[2] \n" "fmla v11.4s, v17.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v8.4s, v18.4s, %18.s[3] \n" "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v10.4s, v19.4s, %19.s[0] \n" "fmla v11.4s, v19.4s, %17.s[3] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld1 {v12.4s, v13.4s}, [%5] \n" // v12 v13 = r30 r34 "fmla v8.4s, v20.4s, %19.s[1] \n" "fmla v9.4s, v20.4s, %18.s[0] \n" "add %5, %5, #16 \n" "fmla v10.4s, v12.4s, %19.s[3] \n" "fmla v11.4s, v12.4s, %18.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r31 "fmla v8.4s, v13.4s, %20.s[3] \n" "fmla v9.4s, v13.4s, %19.s[2] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r32 "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v11.4s, v21.4s, %18.s[3] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r33 "fmla v8.4s, v22.4s, %20.s[1] \n" "fmla v9.4s, v22.4s, %19.s[0] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4s, v17.4s}, [%6] \n" // v16 v17 = r40 r44 "fmla v10.4s, v23.4s, %20.s[2] \n" "fmla v11.4s, v23.4s, %19.s[1] \n" "add %6, %6, #16 \n" "fmla v8.4s, v16.4s, %21.s[0] \n" "fmla v9.4s, v16.4s, %19.s[3] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v10.4s, v17.4s, %22.s[0] \n" "fmla v11.4s, v17.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v8.4s, v18.4s, %21.s[1] \n" "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v10.4s, v19.4s, %21.s[2] \n" "fmla v11.4s, v19.4s, %20.s[1] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4s, v17.4s}, [%2] \n" // v16 v17 = r00 r04 "fmla v8.4s, v20.4s, %21.s[3] \n" "fmla v9.4s, v20.4s, %20.s[2] \n" // r5 "prfm pldl1keep, [%7, #256] \n" "ld1 {v12.4s, v13.4s}, [%7] \n" // v12 v13 = r50 r54 "fmla v10.4s, v16.4s, %16.s[0] \n" "fmla v11.4s, v12.4s, %21.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %17.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v13.4s, %22.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v10.4s, v18.4s, %16.s[1] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v11.4s, v21.4s, %21.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %16.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %21.s[2] \n" "add %3, %3, #16 \n" "fmla v10.4s, v20.4s, %16.s[3] \n" "fmla v11.4s, v23.4s, %21.s[3] \n" "add %2, %2, #16 \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "add %7, %7, #16 \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" : "=r"(outptr), // %0 "=r"(outptr2), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr), "1"(outptr2), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k0123), // %16 "w"(_k4567), // %17 "w"(_k891011), // %18 "w"(_k12131415), // %19 "w"(_k16171819), // %20 "w"(_k20212223), // %21 "w"(_k24242424), // %22 "w"(_bias0) // %23 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #else if (nn > 0) { asm volatile( // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "vmov q8, %q25 \n" // q8 = _bias0 "0: \n" "vmov q9, %q25 \n" // q9 = _bias0 "vmla.f32 q8, q14, %e19[1] \n" "vmla.f32 q9, q14, %e18[0] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q8, q15, %e20[1] \n" "vmla.f32 q9, q15, %e19[0] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f19[0] \n" "vmla.f32 q9, q12, %e18[1] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q8, q13, %f19[1] \n" "vmla.f32 q9, q13, %f18[0] \n" // r2 "pld [%5, #256] \n" "vld1.f32 {d20-d23}, [%5] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e20[0] \n" "vmla.f32 q9, q12, %f18[1] \n" "add %5, #16 \n" "vmla.f32 q8, q10, %f20[0] \n" "vmla.f32 q9, q10, %e19[1] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f21[0] \n" "vmla.f32 q9, q11, %e20[1] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q8, q12, %f20[1] \n" "vmla.f32 q9, q12, %f19[0] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e21[0] \n" "vmla.f32 q9, q13, %f19[1] \n" // r3 "pld [%6, #256] \n" "vld1.f32 {d28-d31}, [%6] \n" // q14 q15 = r30 r34 "vmla.f32 q8, q12, %e21[1] \n" "vmla.f32 q9, q12, %e20[0] \n" "add %6, #16 \n" "vmla.f32 q8, q14, %f21[1] \n" "vmla.f32 q9, q14, %f20[0] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q8, q15, %f22[1] \n" "vmla.f32 q9, q15, %f21[0] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e22[0] \n" "vmla.f32 q9, q12, %f20[1] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q8, q13, %e22[1] \n" "vmla.f32 q9, q13, %e21[0] \n" // r4 "pld [%7, #256] \n" "vld1.f32 {d20-d23}, [%7] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f22[0] \n" "vmla.f32 q9, q12, %e21[1] \n" "add %7, #16 \n" "vmla.f32 q8, q10, %e23[0] \n" "vmla.f32 q9, q10, %f21[1] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e24[0] \n" "vmla.f32 q9, q11, %f22[1] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q8, q12, %e23[1] \n" "vmla.f32 q9, q12, %e22[0] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f23[0] \n" "vmla.f32 q9, q13, %e22[1] \n" // r0 and r5 "pld [%3, #256] \n" "vld1.f32 {d20-d23}, [%3] \n" // q10 q11 = r00 r04 "vmla.f32 q8, q12, %f23[1] \n" "vmla.f32 q9, q12, %f22[0] \n" // r5 "pld [%8, #256] \n" "vld1.f32 {d28-d31}, [%8] \n" // q14 q15 = r50 r54 "vmla.f32 q8, q10, %e18[0] \n" "vmla.f32 q9, q14, %e23[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e19[0] \n" "vmla.f32 q9, q15, %e24[0] \n" "vext.32 q13, q14, q15, #1 \n" // r51 "vmla.f32 q8, q12, %e18[1] \n" "vext.32 q12, q10, q11, #2 \n" // r02 "vmla.f32 q9, q13, %e23[1] \n" "vext.32 q13, q14, q15, #2 \n" // r52 "vmla.f32 q8, q12, %f18[0] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q9, q13, %f23[0] \n" "vext.32 q13, q14, q15, #3 \n" // r33 "vmla.f32 q8, q12, %f18[1] \n" "add %3, #16 \n" "vmla.f32 q9, q13, %f23[1] \n" "add %4, #16 \n" // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "add %8, #16 \n" "vst1.f32 {d16-d17}, [%1]! \n" "vmov q8, %q25 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; float sum2 = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 += r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // v10 v11 // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmul v11.4s, v17.4s, %14.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v17.4s, %15.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %15.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v10.4s, v19.4s, %14.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v11.4s, v20.4s, %14.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v21.4s, %14.s[2] \n" "fmla v9.4s, v22.4s, %14.s[2] \n" // r1 "prfm pldl1keep, [%3, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n" // v12 v13 v14 = r10 r14 r18 "fmla v10.4s, v19.4s, %14.s[3] \n" "fmla v11.4s, v20.4s, %14.s[3] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %15.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %16.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %16.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %15.s[2] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %15.s[2] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %15.s[3] \n" "fmla v11.4s, v22.4s, %15.s[3] \n" // r2 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r20 r24 r28 "fmla v8.4s, v19.4s, %16.s[0] \n" "fmla v9.4s, v20.4s, %16.s[0] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v11.4s, v17.4s, %16.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r25 "fmla v8.4s, v17.4s, %17.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r26 "fmla v10.4s, v19.4s, %16.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v11.4s, v20.4s, %16.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r27 "fmla v8.4s, v21.4s, %17.s[0] \n" "fmla v9.4s, v22.4s, %17.s[0] \n" // r3 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r30 r34 r38 "fmla v10.4s, v19.4s, %17.s[1] \n" "fmla v11.4s, v20.4s, %17.s[1] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %17.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %18.s[3] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %18.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %18.s[0] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %18.s[0] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %18.s[1] \n" "fmla v11.4s, v22.4s, %18.s[1] \n" // r4 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r40 r44 r48 "fmla v8.4s, v19.4s, %18.s[2] \n" "fmla v9.4s, v20.4s, %18.s[2] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v11.4s, v17.4s, %19.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r45 "fmla v8.4s, v17.4s, %20.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r46 "fmla v10.4s, v19.4s, %19.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v11.4s, v20.4s, %19.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r47 "fmla v8.4s, v21.4s, %19.s[2] \n" "add %2, %2, #32 \n" "fmla v9.4s, v22.4s, %19.s[2] \n" "add %3, %3, #32 \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "add %4, %4, #32 \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "add %5, %5, #32 \n" "fadd v10.4s, v8.4s, v10.4s \n" "add %6, %6, #32 \n" "fadd v11.4s, v9.4s, v11.4s \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22"); } if (remain >= 4) { remain -= 4; asm volatile( // r0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4s, v17.4s}, [%1] \n" // v16 v17 = r00 r04 "mov v8.16b, %19.16b \n" // v8 = _bias0 "add %1, %1, #16 \n" "fmul v9.4s, v16.4s, %12.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %13.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %12.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %12.s[2] \n" // r1 "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" // v10 v11 = r10 r14 "fmla v9.4s, v20.4s, %12.s[3] \n" "add %2, %2, #16 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r11 "fmla v9.4s, v11.4s, %14.s[1] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r12 "fmla v8.4s, v12.4s, %13.s[2] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r13 "fmla v9.4s, v13.4s, %13.s[3] \n" // r2 "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4s, v17.4s}, [%3] \n" // v16 v17 = r20 r24 "fmla v8.4s, v14.4s, %14.s[0] \n" "add %3, %3, #16 \n" "fmla v9.4s, v16.4s, %14.s[2] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v8.4s, v17.4s, %15.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %14.s[3] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v8.4s, v19.4s, %15.s[0] \n" // r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4] \n" // v10 v11 = r30 r34 "fmla v9.4s, v20.4s, %15.s[1] \n" "add %4, %4, #16 \n" "fmla v8.4s, v10.4s, %15.s[3] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r31 "fmla v9.4s, v11.4s, %16.s[3] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r32 "fmla v8.4s, v12.4s, %16.s[0] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r33 "fmla v9.4s, v13.4s, %16.s[1] \n" // r4 "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4s, v17.4s}, [%5] \n" // v16 v17 = r40 r44 "fmla v8.4s, v14.4s, %16.s[2] \n" "add %5, %5, #16 \n" "fmla v9.4s, v16.4s, %17.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v8.4s, v17.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %17.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v8.4s, v19.4s, %17.s[2] \n" "fmla v9.4s, v20.4s, %17.s[3] \n" "fadd v8.4s, v8.4s, v9.4s \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vmov q8, %q21 \n" // q8 = _bias0 "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e15[0] \n" "vext.32 q13, q10, q11, #2 \n" // r02 "vmla.f32 q9, q12, %e14[1] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q8, q13, %f14[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3] \n" // q14 q15 = r10 r14 "vmla.f32 q9, q12, %f14[1] \n" "add %3, #16 \n" "vmla.f32 q8, q14, %e15[1] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q9, q15, %e16[1] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f15[0] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q9, q13, %f15[1] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d20-d23}, [%4] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e16[0] \n" "add %4, #16 \n" "vmla.f32 q9, q10, %f16[0] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f17[0] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q9, q12, %f16[1] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e17[0] \n" // r3 "pld [%5, #256] \n" "vld1.f32 {d28-d31}, [%5] \n" // q14 q15 = r30 r34 "vmla.f32 q9, q12, %e17[1] \n" "add %5, #16 \n" "vmla.f32 q8, q14, %f17[1] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q9, q15, %f18[1] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e18[0] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q9, q13, %e18[1] \n" // r4 "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f18[0] \n" "add %6, #16 \n" "vmla.f32 q9, q10, %e19[0] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e20[0] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q9, q12, %e19[1] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f19[0] \n" "add %2, #16 \n" "vmla.f32 q9, q12, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vadd.f32 q9, q9, q8 \n" "vmov q8, %q21 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ // TODO neon assembly optimize float sum = bias0; float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #else // TODO neon assembly optimize asm volatile( "veor q14, q14 \n" "vext.32 q14, %q19, q14, #3 \n" // q14 = bias0 0 0 0 "vld1.f32 {d16-d17}, [%1] \n" // q8 = r00 r01 r02 r03 "vld1.f32 {d18-d19}, [%2] \n" // q9 = r10 r11 r12 r13(X) "add r4, %1, #16 \n" "vld1.f32 {d19[1]}, [r4] \n" "vext.32 q9, q9, q9, #3 \n" // q9 = r04 r10 r11 r12 "vmla.f32 q14, q8, %q12 \n" "add r4, %2, #12 \n" "vld1.f32 {d20}, [r4] \n" // d20 = r13 r14 "vld1.f32 {d21}, [%3] \n" // d21 = r20 r21 "vmla.f32 q14, q9, %q13 \n" "add r4, %3, #8 \n" "vld1.f32 {d22-d23}, [r4] \n" // q11 = r22 r23 r24 X "vld1.f32 {d23[1]}, [%4] \n" // q11 = r22 r23 r24 r30 "vmla.f32 q14, q10, %q14 \n" "add r4, %4, #4 \n" "vld1.f32 {d24-d25}, [r4] \n" // q12 = r31 r32 r33 r34 "vmla.f32 q14, q11, %q15 \n" "vld1.f32 {d26-d27}, [%5] \n" // q13 = r40 r41 r42 r43 "vmla.f32 q14, q12, %q16 \n" "veor d30, d30 \n" "add r4, %5, #16 \n" "vld1.f32 {d30[0]}, [r4] \n" // d30 = r44 0 "vmla.f32 q14, q13, %q17 \n" "vmla.f32 d28, d30, %e18 \n" "add %1, #4 \n" // h-sum "vadd.f32 d28, d28, d29 \n" "add %2, #4 \n" "add %3, #4 \n" "vpadd.f32 d28, d28, d28 \n" "add %4, #4 \n" "add %5, #4 \n" "vst1.f32 {d28[0]}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #endif } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; //int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; // NOTE unroll outh 2 results somewhat speed drop :| (about -4%) // so we do not implement it here for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v20.4s, v21.4s}, [%2] \n" // v20 v21 = r016 r017 "fmul v11.4s, v18.4s, %14.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r02 "fmla v8.4s, v17.4s, %14.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r010 "fmla v9.4s, v19.4s, %14.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r03 "fmla v10.4s, v22.4s, %14.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r011 "fmla v11.4s, v25.4s, %14.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r04 "fmla v8.4s, v23.4s, %14.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r012 "fmla v9.4s, v26.4s, %14.s[3] \n" // r1 "prfm pldl1keep, [%3, #256] \n" "ld2 {v12.4s, v13.4s}, [%3], #32 \n" // v12 v13 = r10 r11 "fmla v10.4s, v24.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v14.4s, v15.4s}, [%3], #32 \n" // v14 v15 = r18 r19 "fmla v11.4s, v27.4s, %15.s[0] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v20.4s, v21.4s}, [%3] \n" // v20 v21 = r116 r117 "fmla v9.4s, v14.4s, %15.s[1] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r12 "fmla v10.4s, v13.4s, %15.s[2] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r110 "fmla v11.4s, v15.4s, %15.s[2] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r13 "fmla v8.4s, v22.4s, %15.s[3] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r111 "fmla v9.4s, v25.4s, %15.s[3] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r14 "fmla v10.4s, v23.4s, %16.s[0] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r112 "fmla v11.4s, v26.4s, %16.s[0] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld2 {v16.4s, v17.4s}, [%4], #32 \n" // v16 v17 = r20 r21 "fmla v8.4s, v24.4s, %16.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v18.4s, v19.4s}, [%4], #32 \n" // v18 v19 = r28 r29 "fmla v9.4s, v27.4s, %16.s[1] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v20.4s, v21.4s}, [%4] \n" // v20 v21 = r216 r217 "fmla v11.4s, v18.4s, %16.s[2] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r22 "fmla v8.4s, v17.4s, %16.s[3] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r210 "fmla v9.4s, v19.4s, %16.s[3] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r23 "fmla v10.4s, v22.4s, %17.s[0] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r211 "fmla v11.4s, v25.4s, %17.s[0] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r24 "fmla v8.4s, v23.4s, %17.s[1] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r212 "fmla v9.4s, v26.4s, %17.s[1] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld2 {v12.4s, v13.4s}, [%5], #32 \n" // v12 v13 = r30 r31 "fmla v10.4s, v24.4s, %17.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v14.4s, v15.4s}, [%5], #32 \n" // v14 v15 = r38 r39 "fmla v11.4s, v27.4s, %17.s[2] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v20.4s, v21.4s}, [%5] \n" // v20 v21 = r316 r317 "fmla v9.4s, v14.4s, %17.s[3] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r32 "fmla v10.4s, v13.4s, %18.s[0] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r310 "fmla v11.4s, v15.4s, %18.s[0] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r33 "fmla v8.4s, v22.4s, %18.s[1] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r311 "fmla v9.4s, v25.4s, %18.s[1] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r34 "fmla v10.4s, v23.4s, %18.s[2] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r312 "fmla v11.4s, v26.4s, %18.s[2] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld2 {v16.4s, v17.4s}, [%6], #32 \n" // v16 v17 = r40 r41 "fmla v8.4s, v24.4s, %18.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v18.4s, v19.4s}, [%6], #32 \n" // v18 v19 = r48 r49 "fmla v9.4s, v27.4s, %18.s[3] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v20.4s, v21.4s}, [%6] \n" // v20 v21 = r416 r417 "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r42 "fmla v8.4s, v17.4s, %19.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r410 "fmla v9.4s, v19.4s, %19.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r43 "fmla v10.4s, v22.4s, %19.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r411 "fmla v11.4s, v25.4s, %19.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r44 "fmla v8.4s, v23.4s, %19.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r412 "fmla v9.4s, v26.4s, %19.s[3] \n" "fmla v10.4s, v24.4s, %20.s[0] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "fmla v11.4s, v27.4s, %20.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "fadd v10.4s, v8.4s, v10.4s \n" "fadd v11.4s, v9.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmov q8, %q21 \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vmov d26, d25 \n" // q13 = r09 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r02 "vmla.f32 q8, q11, %e14[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r03 "vmla.f32 q9, q14, %f14[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r04 "vmla.f32 q8, q15, %f14[1] \n" // r1 "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" // q10 q11 = r10 r11 "vmla.f32 q9, q14, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d24-d25}, [%3] \n" // q12 = r18 x x "vmla.f32 q8, q10, %e15[1] \n" "vmov d26, d25 \n" // q13 = r19 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r12 "vmla.f32 q9, q11, %f15[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r13 "vmla.f32 q8, q14, %f15[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r14 "vmla.f32 q9, q15, %e16[0] \n" // r2 "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4]! \n" // q10 q11 = r20 r21 "vmla.f32 q8, q14, %e16[1] \n" "pld [%4, #128] \n" "vld2.f32 {d24-d25}, [%4] \n" // q12 = r28 x x "vmla.f32 q9, q10, %f16[0] \n" "vmov d26, d25 \n" // q13 = r29 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r22 "vmla.f32 q8, q11, %f16[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r23 "vmla.f32 q9, q14, %e17[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r24 "vmla.f32 q8, q15, %e17[1] \n" // r3 "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5]! \n" // q10 q11 = r30 r31 "vmla.f32 q9, q14, %f17[0] \n" "pld [%5, #128] \n" "vld2.f32 {d24-d25}, [%5] \n" // q12 = r38 x x "vmla.f32 q8, q10, %f17[1] \n" "vmov d26, d25 \n" // q13 = r39 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r32 "vmla.f32 q9, q11, %e18[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r33 "vmla.f32 q8, q14, %e18[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r34 "vmla.f32 q9, q15, %f18[0] \n" // r4 "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6]! \n" // q10 q11 = r40 r41 "vmla.f32 q8, q14, %f18[1] \n" "pld [%6, #128] \n" "vld2.f32 {d24-d25}, [%6] \n" // q12 = r48 x x "vmla.f32 q9, q10, %e19[0] \n" "vmov d26, d25 \n" // q13 = r49 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r42 "vmla.f32 q8, q11, %e19[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r43 "vmla.f32 q9, q14, %f19[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r44 "vmla.f32 q8, q15, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmla.f32 q9, q14, %e20[0] \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "vadd.f32 q9, q8, q9 \n" "vmov q8, %q21 \n" "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr = sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
resource_manager.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & Newcastle University for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RESOURCE_MANAGER_H_ #define CORE_RESOURCE_MANAGER_H_ #include <omp.h> #include <sched.h> #include <algorithm> #include <cmath> #include <limits> #include <memory> #include <ostream> #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(USE_OPENCL) && !defined(__ROOTCLING__) #ifdef __APPLE__ #define CL_HPP_ENABLE_EXCEPTIONS #define CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY #define CL_HPP_MINIMUM_OPENCL_VERSION 120 #define CL_HPP_TARGET_OPENCL_VERSION 120 #include "cl2.hpp" #else #define __CL_ENABLE_EXCEPTIONS #include <CL/cl2.hpp> #endif #endif #include "core/agent/agent.h" #include "core/agent/agent_handle.h" #include "core/agent/agent_uid.h" #include "core/agent/agent_uid_generator.h" #include "core/container/agent_uid_map.h" #include "core/diffusion/diffusion_grid.h" #include "core/operation/operation.h" #include "core/simulation.h" #include "core/type_index.h" #include "core/util/numa.h" #include "core/util/root.h" #include "core/util/thread_info.h" #include "core/util/type.h" namespace bdm { /// ResourceManager stores agents and diffusion grids and provides /// methods to add, remove, and access them. Agents are uniquely identified /// by their AgentUid, and AgentHandle. An AgentHandle might change during the /// simulation. class ResourceManager { public: explicit ResourceManager(TRootIOCtor* r) {} ResourceManager(); virtual ~ResourceManager() { for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } if (type_index_) { delete type_index_; } } ResourceManager& operator=(ResourceManager&& other) { if (agents_.size() != other.agents_.size()) { Log::Fatal( "Restored ResourceManager has different number of NUMA nodes."); } for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } agents_ = std::move(other.agents_); diffusion_grids_ = std::move(other.diffusion_grids_); RebuildAgentUidMap(); // restore type_index_ if (type_index_) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { type_index_->Add(agent); } } } return *this; } void RebuildAgentUidMap() { // rebuild uid_ah_map_ uid_ah_map_.clear(); auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); uid_ah_map_.resize(agent_uid_generator->GetHighestIndex() + 1); for (unsigned n = 0; n < agents_.size(); ++n) { for (unsigned i = 0; i < agents_[n].size(); ++i) { auto* agent = agents_[n][i]; this->uid_ah_map_.Insert(agent->GetUid(), AgentHandle(n, i)); } } } Agent* GetAgent(const AgentUid& uid) { if (!uid_ah_map_.Contains(uid)) { return nullptr; } auto& ah = uid_ah_map_[uid]; return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } Agent* GetAgent(AgentHandle ah) { return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } AgentHandle GetAgentHandle(const AgentUid& uid) { return uid_ah_map_[uid]; } void AddDiffusionGrid(DiffusionGrid* dgrid) { uint64_t substance_id = dgrid->GetSubstanceId(); auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { Log::Fatal("ResourceManager::AddDiffusionGrid", "You tried to add a diffusion grid with an already existing " "substance id. Please choose a different substance id."); } else { diffusion_grids_[substance_id] = dgrid; } } void RemoveDiffusionGrid(size_t substance_id) { auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { delete search->second; diffusion_grids_.erase(search); } else { Log::Error("ResourceManager::RemoveDiffusionGrid", "You tried to remove a diffusion grid that does not exist."); } } /// Return the diffusion grid which holds the substance of specified id DiffusionGrid* GetDiffusionGrid(size_t substance_id) const { if(substance_id >= diffusion_grids_.size()) { Log::Error("DiffusionGrid::GetDiffusionGrid", "You tried to request diffusion grid '", substance_id, "', but it does not exist! Make sure that it's the correct id " "correctly and that the diffusion grid is registered."); return nullptr; } return diffusion_grids_.at(substance_id); } /// Return the diffusion grid which holds the substance of specified name /// Caution: using this function in a tight loop will result in a slow /// simulation. Use `GetDiffusionGrid(size_t)` in those cases. DiffusionGrid* GetDiffusionGrid(std::string substance_name) const { for (auto& el : diffusion_grids_) { auto& dg = el.second; if (dg->GetSubstanceName() == substance_name) { return dg; } } Log::Error("DiffusionGrid::GetDiffusionGrid", "You tried to request a diffusion grid named '", substance_name, "', but it does not exist! Make sure that it's spelled " "correctly and that the diffusion grid is registered."); return nullptr; } /// Execute the given functor for all diffusion grids /// rm->ForEachDiffusionGrid([](DiffusionGrid* dgrid) { /// ... /// }); template <typename TFunctor> void ForEachDiffusionGrid(TFunctor&& f) const { for (auto& el : diffusion_grids_) { f(el.second); } } /// Returns the total number of agents if numa_node == -1 /// Otherwise the number of agents in the specific numa node size_t GetNumAgents(int numa_node = -1) const { if (numa_node == -1) { size_t num_agents = 0; for (auto& numa_agents : agents_) { num_agents += numa_agents.size(); } return num_agents; } else { return agents_[numa_node].size(); } } /// Apply a function on all elements in every container /// @param function that will be called with each container as a parameter /// /// rm->ForEachAgent([](Agent* element) { /// std::cout << *element << std::endl; /// }); virtual void ForEachAgent(const std::function<void(Agent*)>& function) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { function(agent); } } } virtual void ForEachAgent( const std::function<void(Agent*, AgentHandle)>& function) { for (uint64_t n = 0; n < agents_.size(); ++n) { auto& numa_agents = agents_[n]; for (uint64_t i = 0; i < numa_agents.size(); ++i) { function(numa_agents[i], AgentHandle(n, i)); } } } /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Functor<void, Agent*>& function); /// Apply an operation on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Operation& op); virtual void ForEachAgentParallel( Functor<void, Agent*, AgentHandle>& function); /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses dynamic scheduling and work stealing. Batch size controlled by /// `chunk`. /// \param chunk number of agents that are assigned to a thread (batch /// size) /// \see ForEachAgent virtual void ForEachAgentParallel( uint64_t chunk, Functor<void, Agent*, AgentHandle>& function); /// Reserves enough memory to hold `capacity` number of agents for /// each numa domain. void Reserve(size_t capacity) { for (auto& numa_agents : agents_) { numa_agents.reserve(capacity); } if (type_index_) { type_index_->Reserve(capacity); } } /// Resize `agents_[numa_node]` such that it holds `current + additional` /// elements after this call. /// Returns the size after uint64_t GrowAgentContainer(size_t additional, size_t numa_node) { if (additional == 0) { return agents_[numa_node].size(); } auto current = agents_[numa_node].size(); if (current + additional < agents_[numa_node].size()) { agents_[numa_node].reserve((current + additional) * 1.5); } agents_[numa_node].resize(current + additional); return current; } /// Returns true if an agent with the given uid is stored in this /// ResourceManager. bool ContainsAgent(const AgentUid& uid) const { return uid_ah_map_.Contains(uid); } /// Remove all agents /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void ClearAgents() { uid_ah_map_.clear(); for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } numa_agents.clear(); } if (type_index_) { type_index_->Clear(); } } /// Reorder agents such that, agents are distributed to NUMA /// nodes. Nearby agents will be moved to the same NUMA node. virtual void LoadBalance(); void DebugNuma() const; /// NB: This method is not thread-safe! This function might invalidate /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void AddAgent(Agent* agent, // NOLINT typename AgentHandle::NumaNode_t numa_node = 0) { auto uid = agent->GetUid(); if (uid.GetIndex() >= uid_ah_map_.size()) { uid_ah_map_.resize(uid.GetIndex() + 1); } agents_[numa_node].push_back(agent); uid_ah_map_.Insert(uid, AgentHandle(numa_node, agents_[numa_node].size() - 1)); if (type_index_) { type_index_->Add(agent); } } void ResizeAgentUidMap() { auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); auto highest_idx = agent_uid_generator->GetHighestIndex(); auto new_size = highest_idx * 1.5 + 1; if (highest_idx >= uid_ah_map_.size()) { uid_ah_map_.resize(new_size); } if (type_index_) { type_index_->Reserve(new_size); } } void EndOfIteration() { // Check if SoUiD defragmentation should be turned on or off double utilization = static_cast<double>(GetNumAgents()) / static_cast<double>(uid_ah_map_.size()); auto* sim = Simulation::GetActive(); auto* param = sim->GetParam(); if (utilization < param->agent_uid_defragmentation_low_watermark) { sim->GetAgentUidGenerator()->EnableDefragmentation(&uid_ah_map_); } else if (utilization > param->agent_uid_defragmentation_high_watermark) { sim->GetAgentUidGenerator()->DisableDefragmentation(); } } /// Adds `new_agents` to `agents_[numa_node]`. `offset` specifies /// the index at which the first element is inserted. Agents are inserted /// consecutively. This methos is thread safe only if insertion intervals do /// not overlap! virtual void AddAgents(typename AgentHandle::NumaNode_t numa_node, uint64_t offset, const std::vector<Agent*>& new_agents) { uint64_t i = 0; for (auto* agent : new_agents) { auto uid = agent->GetUid(); uid_ah_map_.Insert(uid, AgentHandle(numa_node, offset + i)); agents_[numa_node][offset + i] = agent; i++; } if (type_index_) { #pragma omp critical for (auto* agent : new_agents) { type_index_->Add(agent); } } } /// Removes the agent with the given uid.\n /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void RemoveAgent(const AgentUid& uid) { // remove from map if (uid_ah_map_.Contains(uid)) { auto ah = uid_ah_map_[uid]; uid_ah_map_.Remove(uid); // remove from vector auto& numa_agents = agents_[ah.GetNumaNode()]; Agent* agent = nullptr; if (ah.GetElementIdx() == numa_agents.size() - 1) { agent = numa_agents.back(); numa_agents.pop_back(); } else { // swap agent = numa_agents[ah.GetElementIdx()]; auto* reordered = numa_agents.back(); numa_agents[ah.GetElementIdx()] = reordered; numa_agents.pop_back(); uid_ah_map_.Insert(reordered->GetUid(), ah); } if (type_index_) { type_index_->Remove(agent); } delete agent; } } const TypeIndex* GetTypeIndex() const { return type_index_; } protected: /// Maps an AgentUid to its storage location in `agents_` \n AgentUidMap<AgentHandle> uid_ah_map_ = AgentUidMap<AgentHandle>(100u); //! /// Pointer container for all agents std::vector<std::vector<Agent*>> agents_; /// Maps a diffusion grid ID to the pointer to the diffusion grid std::unordered_map<uint64_t, DiffusionGrid*> diffusion_grids_; ThreadInfo* thread_info_ = ThreadInfo::GetInstance(); //! TypeIndex* type_index_ = nullptr; friend class SimulationBackup; friend std::ostream& operator<<(std::ostream& os, const ResourceManager& rm); BDM_CLASS_DEF_NV(ResourceManager, 1); }; inline std::ostream& operator<<(std::ostream& os, const ResourceManager& rm) { os << "\033[1mAgents per numa node\033[0m" << std::endl; uint64_t cnt = 0; for (auto& numa_agents : rm.agents_) { os << "numa node " << cnt++ << " -> size: " << numa_agents.size() << std::endl; } return os; } } // namespace bdm #endif // CORE_RESOURCE_MANAGER_H_
kdtree.c
/** @file kdtree.c ** @brief KD-tree - Definition ** @author Andrea Vedaldi, David Novotny **/ /* Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kdtree KD-trees and forests @author Andrea Vedaldi @author David Novotny <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref kdtree.h implements a KD-tree object, a data structure that can efficiently index moderately dimensional vector spaces. Both best-bin-first @cite{beis97shape} and randomized KD-tree forests are implemented @cite{silpa-anan08optimised},@cite{muja09fast}. Applications include fast matching of feature descriptors. - @ref kdtree-overview - @ref kdtree-tech <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kdtree-overview Overview <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> To create a ::VlKDForest object use ::vl_kdforest_new specifying the dimensionality of the data and the number of trees in the forest. With one tree only, the algorithm is analogous to @cite{beis97shape} (best-bin KDTree). Multiple trees correspond to the randomized KDTree forest as in @cite{silpa-anan08optimised},@cite{muja09fast}. To let the KD-tree index some data use ::vl_kdforest_build. Note that for efficiency KD-tree does not copy the data but retains a pointer to it. Therefore the data must exist (and not change) until the KD-tree is deleted. To delete the KD-tree object, use ::vl_kdforest_delete. To find the N nearest neighbors to a query point first instantiate a ::VlKDForestSearcher and then start search using a ::vl_kdforest_query with the searcher object as an argument. To set a maximum number of comparisons per query and calculate approximate nearest neighbors use ::vl_kdforest_set_max_num_comparisons. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kdtree-tech Technical details <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> ::VlKDForest implements the best-bin-first kd-tree of @cite{beis97shape}. <b>Construction.</b> Given a set of points @f$ x_1,\dots,x_n \in \mathbb{R}^d @f$, the algorithm recursively partitions the @e d dimensional Euclidean space @f$ \mathbb{R}^d @f$ into (hyper-) rectangles. Partitions are organized into a binary tree with the root corresponding to the whole space @f$ \mathbb{R}^d @f$. The algorithm refines each partition by dividing it into two halves by thresholding along a given dimension. Both the splitting dimension and the threshold are determined as a statistic of the data points contained in the partition. The splitting dimension is the one which has largest sample variance and the splitting threshold is either the sample mean or the median. Leaves are atomic partitions and they contain a list of zero or more data points (typically one). <b>Querying.</b> Querying amounts to finding the N data points closer to a given query point @f$ x_q \in \mathbb{R}^d @f$. This is done by branch-and-bound. A search state is an active partition (initially the root) and it is weighed by the lower bound on the distance of any point in the partition and the query point. Such a lower bound is trivial to compute because partitions are hyper-rectangles. <b>Querying usage.</b> As said before a user has to create an instance ::VlKDForestSearcher using ::vl_kdforest_new_searcher in order to be able to make queries. When a user wants to delete a KD-Tree all the searchers bound to the given KD-Forest are erased automatically. If a user wants to delete some of the searchers before the KD-Tree erase, he could do it using the vl_kdforest_delete_searcher method. **/ #include "kdtree.h" #include "generic.h" #include "random.h" #include "mathop.h" #include <stdlib.h> #if defined(_OPENMP) #include <omp.h> #endif #define VL_HEAP_prefix vl_kdforest_search_heap #define VL_HEAP_type VlKDForestSearchState #define VL_HEAP_cmp(v,x,y) (v[x].distanceLowerBound - v[y].distanceLowerBound) #include "heap-def.h" #define VL_HEAP_prefix vl_kdtree_split_heap #define VL_HEAP_type VlKDTreeSplitDimension #define VL_HEAP_cmp(v,x,y) (v[x].variance - v[y].variance) #include "heap-def.h" #define VL_HEAP_prefix vl_kdforest_neighbor_heap #define VL_HEAP_type VlKDForestNeighbor #define VL_HEAP_cmp(v,x,y) (v[y].distance - v[x].distance) #include "heap-def.h" /** ------------------------------------------------------------------ ** @internal ** @brief Allocate a new node from the tree pool **/ static vl_uindex vl_kdtree_node_new (VlKDTree * tree, vl_uindex parentIndex) { VlKDTreeNode * node = NULL ; vl_uindex nodeIndex = tree->numUsedNodes ; tree -> numUsedNodes += 1 ; assert (tree->numUsedNodes <= tree->numAllocatedNodes) ; node = tree->nodes + nodeIndex ; node -> parent = parentIndex ; node -> lowerChild = 0 ; node -> upperChild = 0 ; node -> splitDimension = 0 ; node -> splitThreshold = 0 ; return nodeIndex ; } /** ------------------------------------------------------------------ ** @internal ** @brief Compare KDTree index entries for sorting **/ VL_INLINE int vl_kdtree_compare_index_entries (void const * a, void const * b) { double delta = ((VlKDTreeDataIndexEntry const*)a) -> value - ((VlKDTreeDataIndexEntry const*)b) -> value ; if (delta < 0) return -1 ; if (delta > 0) return +1 ; return 0 ; } /** ------------------------------------------------------------------ ** @internal ** @brief Build KDTree recursively ** @param forest forest to which the tree belongs. ** @param tree tree being built. ** @param nodeIndex node to process. ** @param dataBegin begin of data for this node. ** @param dataEnd end of data for this node. ** @param depth depth of this node. **/ static void vl_kdtree_build_recursively (VlKDForest * forest, VlKDTree * tree, vl_uindex nodeIndex, vl_uindex dataBegin, vl_uindex dataEnd, unsigned int depth) { vl_uindex d, i, medianIndex, splitIndex ; VlKDTreeNode * node = tree->nodes + nodeIndex ; VlKDTreeSplitDimension * splitDimension ; /* base case: there is only one data point */ if (dataEnd - dataBegin <= 1) { if (tree->depth < depth) tree->depth = depth ; node->lowerChild = - dataBegin - 1; node->upperChild = - dataEnd - 1 ; return ; } /* compute the dimension with largest variance > 0 */ forest->splitHeapNumNodes = 0 ; for (d = 0 ; d < forest->dimension ; ++ d) { double mean = 0 ; /* unnormalized */ double secondMoment = 0 ; double variance = 0 ; vl_size numSamples = VL_KDTREE_VARIANCE_EST_NUM_SAMPLES; vl_bool useAllData = VL_FALSE; if(dataEnd - dataBegin <= VL_KDTREE_VARIANCE_EST_NUM_SAMPLES) { useAllData = VL_TRUE; numSamples = dataEnd - dataBegin; } for (i = 0; i < numSamples ; ++ i) { vl_uint32 sampleIndex; vl_index di; double datum ; if(useAllData == VL_TRUE) { sampleIndex = (vl_uint32)i; } else { sampleIndex = (vl_rand_uint32(forest->rand) % VL_KDTREE_VARIANCE_EST_NUM_SAMPLES); } sampleIndex += dataBegin; di = tree->dataIndex[sampleIndex].index ; switch(forest->dataType) { case VL_TYPE_FLOAT: datum = ((float const*)forest->data) [di * forest->dimension + d] ; break ; case VL_TYPE_DOUBLE: datum = ((double const*)forest->data) [di * forest->dimension + d] ; break ; default: abort() ; } mean += datum ; secondMoment += datum * datum ; } mean /= numSamples ; secondMoment /= numSamples ; variance = secondMoment - mean * mean ; if (variance <= 0) continue ; /* keep splitHeapSize most varying dimensions */ if (forest->splitHeapNumNodes < forest->splitHeapSize) { VlKDTreeSplitDimension * splitDimension = forest->splitHeapArray + forest->splitHeapNumNodes ; splitDimension->dimension = (unsigned int)d ; splitDimension->mean = mean ; splitDimension->variance = variance ; vl_kdtree_split_heap_push (forest->splitHeapArray, &forest->splitHeapNumNodes) ; } else { VlKDTreeSplitDimension * splitDimension = forest->splitHeapArray + 0 ; if (splitDimension->variance < variance) { splitDimension->dimension = (unsigned int)d ; splitDimension->mean = mean ; splitDimension->variance = variance ; vl_kdtree_split_heap_update (forest->splitHeapArray, forest->splitHeapNumNodes, 0) ; } } } /* additional base case: the maximum variance is equal to 0 (overlapping points) */ if (forest->splitHeapNumNodes == 0) { node->lowerChild = - dataBegin - 1 ; node->upperChild = - dataEnd - 1 ; return ; } /* toss a dice to decide the splitting dimension (variance > 0) */ splitDimension = forest->splitHeapArray + (vl_rand_uint32(forest->rand) % VL_MIN(forest->splitHeapSize, forest->splitHeapNumNodes)) ; node->splitDimension = splitDimension->dimension ; /* sort data along largest variance dimension */ for (i = dataBegin ; i < dataEnd ; ++ i) { vl_index di = tree->dataIndex[i].index ; double datum ; switch (forest->dataType) { case VL_TYPE_FLOAT: datum = ((float const*)forest->data) [di * forest->dimension + splitDimension->dimension] ; break ; case VL_TYPE_DOUBLE: datum = ((double const*)forest->data) [di * forest->dimension + splitDimension->dimension] ; break ; default: abort() ; } tree->dataIndex [i] .value = datum ; } qsort (tree->dataIndex + dataBegin, dataEnd - dataBegin, sizeof (VlKDTreeDataIndexEntry), vl_kdtree_compare_index_entries) ; /* determine split threshold */ switch (forest->thresholdingMethod) { case VL_KDTREE_MEAN : node->splitThreshold = splitDimension->mean ; for (splitIndex = dataBegin ; splitIndex < dataEnd && tree->dataIndex[splitIndex].value <= node->splitThreshold ; ++ splitIndex) ; splitIndex -= 1 ; /* If the mean does not provide a proper partition, fall back to * median. This usually happens if all points have the same * value and the zero variance test fails for numerical accuracy * reasons. In this case, also due to numerical accuracy, the * mean value can be smaller, equal, or larger than all * points. */ if (dataBegin <= splitIndex && splitIndex + 1 < dataEnd) break ; case VL_KDTREE_MEDIAN : medianIndex = (dataBegin + dataEnd - 1) / 2 ; splitIndex = medianIndex ; node -> splitThreshold = tree->dataIndex[medianIndex].value ; break ; default: abort() ; } /* divide subparts */ node->lowerChild = vl_kdtree_node_new (tree, nodeIndex) ; vl_kdtree_build_recursively (forest, tree, node->lowerChild, dataBegin, splitIndex + 1, depth + 1) ; node->upperChild = vl_kdtree_node_new (tree, nodeIndex) ; vl_kdtree_build_recursively (forest, tree, node->upperChild, splitIndex + 1, dataEnd, depth + 1) ; } /** ------------------------------------------------------------------ ** @brief Create new KDForest object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param dimension data dimensionality. ** @param numTrees number of trees in the forest. ** @param distance type of distance norm (::VlDistanceL1 or ::VlDistanceL2). ** @return new KDForest. ** ** The data dimension @a dimension and the number of trees @a ** numTrees must not be smaller than one. **/ VlKDForest * vl_kdforest_new (vl_type dataType, vl_size dimension, vl_size numTrees, VlVectorComparisonType distance) { VlKDForest * self = vl_calloc (sizeof(VlKDForest), 1) ; assert(dataType == VL_TYPE_FLOAT || dataType == VL_TYPE_DOUBLE) ; assert(dimension >= 1) ; assert(numTrees >= 1) ; self -> rand = vl_get_rand () ; self -> dataType = dataType ; self -> numData = 0 ; self -> data = 0 ; self -> dimension = dimension ; self -> numTrees = numTrees ; self -> trees = 0 ; self -> thresholdingMethod = VL_KDTREE_MEDIAN ; self -> splitHeapSize = VL_MIN(numTrees, VL_KDTREE_SPLIT_HEAP_SIZE) ; self -> splitHeapNumNodes = 0 ; self -> distance = distance; self -> maxNumNodes = 0 ; self -> numSearchers = 0 ; self -> headSearcher = 0 ; switch (self->dataType) { case VL_TYPE_FLOAT: self -> distanceFunction = (void(*)(void)) vl_get_vector_comparison_function_f (distance) ; break; case VL_TYPE_DOUBLE : self -> distanceFunction = (void(*)(void)) vl_get_vector_comparison_function_d (distance) ; break ; default : abort() ; } return self ; } /** ------------------------------------------------------------------ ** @brief Create a KDForest searcher object, used for processing queries ** @param kdforest a forest to which the queries should be pointing. ** @return KDForest searcher object. ** ** A searcher is an object attached to the forest which must be created ** before running the queries. Each query has to be invoked with the ** searcher as its argument. ** ** When using a multi-threaded approach a user should at first instantiate ** a correct number of searchers - each used in one thread. ** Then in each thread a query to the given searcher could be run. ** **/ VlKDForestSearcher * vl_kdforest_new_searcher (VlKDForest * kdforest) { VlKDForestSearcher * self = vl_calloc(sizeof(VlKDForestSearcher), 1); if(kdforest->numSearchers == 0) { kdforest->headSearcher = self; self->previous = NULL; self->next = NULL; } else { VlKDForestSearcher * lastSearcher = kdforest->headSearcher; while (1) { if(lastSearcher->next) { lastSearcher = lastSearcher->next; } else { lastSearcher->next = self; self->previous = lastSearcher; self->next = NULL; break; } } } kdforest->numSearchers++; self->forest = kdforest; self->searchHeapArray = vl_malloc (sizeof(VlKDForestSearchState) * kdforest->maxNumNodes) ; self->searchIdBook = vl_calloc (sizeof(vl_uindex), kdforest->numData) ; return self ; } /** ------------------------------------------------------------------ ** @brief Delete object ** @param self object. **/ void vl_kdforestsearcher_delete (VlKDForestSearcher * self) { if (self->previous && self->next) { self->previous->next = self->next; self->next->previous = self->previous; } else if (self->previous && !self->next) { self->previous->next = NULL; } else if (!self->previous && self->next) { self->next->previous = NULL; self->forest->headSearcher = self->next; } else { self->forest->headSearcher = NULL; } self->forest->numSearchers -- ; vl_free(self->searchHeapArray) ; vl_free(self->searchIdBook) ; vl_free(self) ; } VlKDForestSearcher * vl_kdforest_get_searcher (VlKDForest const * self, vl_uindex pos) { VlKDForestSearcher * lastSearcher = self->headSearcher ; vl_uindex i ; for(i = 0; (i < pos) & (lastSearcher != NULL) ; ++i) { lastSearcher = lastSearcher->next ; } return lastSearcher ; } /** ------------------------------------------------------------------ ** @brief Delete KDForest object ** @param self KDForest object to delete ** @sa ::vl_kdforest_new **/ void vl_kdforest_delete (VlKDForest * self) { vl_uindex ti ; VlKDForestSearcher * searcher ; while ((searcher = vl_kdforest_get_searcher(self, 0))) { vl_kdforestsearcher_delete(searcher) ; } if (self->trees) { for (ti = 0 ; ti < self->numTrees ; ++ ti) { if (self->trees[ti]) { if (self->trees[ti]->nodes) vl_free (self->trees[ti]->nodes) ; if (self->trees[ti]->dataIndex) vl_free (self->trees[ti]->dataIndex) ; vl_free (self->trees[ti]) ; } } vl_free (self->trees) ; } vl_free (self) ; } /** ------------------------------------------------------------------ ** @internal @brief Compute tree bounds recursively ** @param tree KDTree object instance. ** @param nodeIndex node index to start from. ** @param searchBounds 2 x numDimension array of bounds. **/ static void vl_kdtree_calc_bounds_recursively (VlKDTree * tree, vl_uindex nodeIndex, double * searchBounds) { VlKDTreeNode * node = tree->nodes + nodeIndex ; vl_uindex i = node->splitDimension ; double t = node->splitThreshold ; node->lowerBound = searchBounds [2 * i + 0] ; node->upperBound = searchBounds [2 * i + 1] ; //VL_PRINT("%f %f\n",node->lowerBound,node->upperBound); if (node->lowerChild > 0) { searchBounds [2 * i + 1] = t ; vl_kdtree_calc_bounds_recursively (tree, node->lowerChild, searchBounds) ; searchBounds [2 * i + 1] = node->upperBound ; } if (node->upperChild > 0) { searchBounds [2 * i + 0] = t ; vl_kdtree_calc_bounds_recursively (tree, node->upperChild, searchBounds) ; searchBounds [2 * i + 0] = node->lowerBound ; } } /** ------------------------------------------------------------------ ** @brief Build KDTree from data ** @param self KDTree object ** @param numData number of data points. ** @param data pointer to the data. ** ** The function builds the KDTree by processing the data @a data. For ** efficiency, KDTree does not make a copy the data, but retains a ** pointer to it. Therefore the data buffer must be valid and ** unchanged for the lifespan of the object. ** ** The number of data points @c numData must not be smaller than one. **/ void vl_kdforest_build (VlKDForest * self, vl_size numData, void const * data) { vl_uindex di, ti ; vl_size maxNumNodes ; double * searchBounds; assert(data) ; assert(numData >= 1) ; /* need to check: if alredy built, clean first */ self->data = data ; self->numData = numData ; self->trees = vl_malloc (sizeof(VlKDTree*) * self->numTrees) ; maxNumNodes = 0 ; for (ti = 0 ; ti < self->numTrees ; ++ ti) { self->trees[ti] = vl_malloc (sizeof(VlKDTree)) ; self->trees[ti]->dataIndex = vl_malloc (sizeof(VlKDTreeDataIndexEntry) * self->numData) ; for (di = 0 ; di < self->numData ; ++ di) { self->trees[ti]->dataIndex[di].index = di ; } self->trees[ti]->numUsedNodes = 0 ; /* num. nodes of a complete binary tree with numData leaves */ self->trees[ti]->numAllocatedNodes = 2 * self->numData - 1 ; self->trees[ti]->nodes = vl_malloc (sizeof(VlKDTreeNode) * self->trees[ti]->numAllocatedNodes) ; self->trees[ti]->depth = 0 ; vl_kdtree_build_recursively (self, self->trees[ti], vl_kdtree_node_new(self->trees[ti], 0), 0, self->numData, 0) ; maxNumNodes += self->trees[ti]->numUsedNodes ; } searchBounds = vl_malloc(sizeof(double) * 2 * self->dimension); for (ti = 0 ; ti < self->numTrees ; ++ ti) { double * iter = searchBounds ; double * end = iter + 2 * self->dimension ; while (iter < end) { *iter++ = - VL_INFINITY_F ; *iter++ = + VL_INFINITY_F ; } vl_kdtree_calc_bounds_recursively (self->trees[ti], 0, searchBounds) ; } vl_free(searchBounds); self -> maxNumNodes = maxNumNodes; } /** ------------------------------------------------------------------ ** @internal @brief **/ vl_uindex vl_kdforest_query_recursively (VlKDForestSearcher * searcher, VlKDTree * tree, vl_uindex nodeIndex, VlKDForestNeighbor * neighbors, vl_size numNeighbors, vl_size * numAddedNeighbors, double dist, void const * query) { VlKDTreeNode const * node = tree->nodes + nodeIndex ; vl_uindex i = node->splitDimension ; vl_index nextChild, saveChild ; double delta, saveDist ; double x ; double x1 = node->lowerBound ; double x2 = node->splitThreshold ; double x3 = node->upperBound ; VlKDForestSearchState * searchState ; searcher->searchNumRecursions ++ ; switch (searcher->forest->dataType) { case VL_TYPE_FLOAT : x = ((float const*) query)[i] ; break ; case VL_TYPE_DOUBLE : x = ((double const*) query)[i] ; break ; default : abort() ; } /* base case: this is a leaf node */ if (node->lowerChild < 0) { vl_index begin = - node->lowerChild - 1 ; vl_index end = - node->upperChild - 1 ; vl_index iter ; for (iter = begin ; iter < end && (searcher->forest->searchMaxNumComparisons == 0 || searcher->searchNumComparisons < searcher->forest->searchMaxNumComparisons) ; ++ iter) { vl_index di = tree->dataIndex [iter].index ; /* multiple KDTrees share the database points and we must avoid * adding the same point twice */ if (searcher->searchIdBook[di] == searcher->searchId) continue ; searcher->searchIdBook[di] = searcher->searchId ; /* compare the query to this point */ switch (searcher->forest->dataType) { case VL_TYPE_FLOAT: dist = ((VlFloatVectorComparisonFunction)searcher->forest->distanceFunction) (searcher->forest->dimension, ((float const *)query), ((float const*)searcher->forest->data) + di * searcher->forest->dimension) ; break ; case VL_TYPE_DOUBLE: dist = ((VlDoubleVectorComparisonFunction)searcher->forest->distanceFunction) (searcher->forest->dimension, ((double const *)query), ((double const*)searcher->forest->data) + di * searcher->forest->dimension) ; break ; default: abort() ; } searcher->searchNumComparisons += 1 ; /* see if it should be added to the result set */ if (*numAddedNeighbors < numNeighbors) { VlKDForestNeighbor * newNeighbor = neighbors + *numAddedNeighbors ; newNeighbor->index = di ; newNeighbor->distance = dist ; vl_kdforest_neighbor_heap_push (neighbors, numAddedNeighbors) ; } else { VlKDForestNeighbor * largestNeighbor = neighbors + 0 ; if (largestNeighbor->distance > dist) { largestNeighbor->index = di ; largestNeighbor->distance = dist ; vl_kdforest_neighbor_heap_update (neighbors, *numAddedNeighbors, 0) ; } } } /* next data point */ return nodeIndex ; } #if 0 assert (x1 <= x2 && x2 <= x3) ; assert (node->lowerChild >= 0) ; assert (node->upperChild >= 0) ; #endif /* * x1 x2 x3 * x (---|---] * (--x|---] * (---|x--] * (---|---] x */ delta = x - x2 ; saveDist = dist + delta*delta ; if (x <= x2) { nextChild = node->lowerChild ; saveChild = node->upperChild ; if (x <= x1) { delta = x - x1 ; saveDist -= delta*delta ; } } else { nextChild = node->upperChild ; saveChild = node->lowerChild ; if (x > x3) { delta = x - x3 ; saveDist -= delta*delta ; } } if (*numAddedNeighbors < numNeighbors || neighbors[0].distance > saveDist) { searchState = searcher->searchHeapArray + searcher->searchHeapNumNodes ; searchState->tree = tree ; searchState->nodeIndex = saveChild ; searchState->distanceLowerBound = saveDist ; vl_kdforest_search_heap_push (searcher->searchHeapArray , &searcher->searchHeapNumNodes) ; } return vl_kdforest_query_recursively (searcher, tree, nextChild, neighbors, numNeighbors, numAddedNeighbors, dist, query) ; } /** ------------------------------------------------------------------ ** @brief Query the forest ** @param self object. ** @param neighbors list of nearest neighbors found (output). ** @param numNeighbors number of nearest neighbors to find. ** @param query query point. ** @return number of tree leaves visited. ** ** A neighbor is represented by an instance of the structure ** ::VlKDForestNeighbor. Each entry contains the index of the ** neighbor (this is an index into the KDTree data) and its distance ** to the query point. Neighbors are sorted by increasing distance. **/ vl_size vl_kdforest_query (VlKDForest * self, VlKDForestNeighbor * neighbors, vl_size numNeighbors, void const * query) { VlKDForestSearcher * searcher = vl_kdforest_get_searcher(self, 0) ; if (searcher == NULL) { searcher = vl_kdforest_new_searcher(self) ; } return vl_kdforestsearcher_query(searcher, neighbors, numNeighbors, query) ; } /** ------------------------------------------------------------------ ** @brief Query the forest ** @param self object. ** @param neighbors list of nearest neighbors found (output). ** @param numNeighbors number of nearest neighbors to find. ** @param query query point. ** @return number of tree leaves visited. ** ** A neighbor is represented by an instance of the structure ** ::VlKDForestNeighbor. Each entry contains the index of the ** neighbor (this is an index into the KDTree data) and its distance ** to the query point. Neighbors are sorted by increasing distance. **/ vl_size vl_kdforestsearcher_query (VlKDForestSearcher * self, VlKDForestNeighbor * neighbors, vl_size numNeighbors, void const * query) { vl_uindex i, ti ; vl_bool exactSearch = self->forest->searchMaxNumComparisons == 0 ; VlKDForestSearchState * searchState ; vl_size numAddedNeighbors = 0 ; assert (neighbors) ; assert (numNeighbors > 0) ; assert (query) ; /* this number is used to differentiate a query from the next */ self -> searchId += 1 ; self -> searchNumRecursions = 0 ; self->searchNumComparisons = 0 ; self->searchNumSimplifications = 0 ; /* put the root node into the search heap */ self->searchHeapNumNodes = 0 ; for (ti = 0 ; ti < self->forest->numTrees ; ++ ti) { searchState = self->searchHeapArray + self->searchHeapNumNodes ; searchState -> tree = self->forest->trees[ti] ; searchState -> nodeIndex = 0 ; searchState -> distanceLowerBound = 0 ; vl_kdforest_search_heap_push (self->searchHeapArray, &self->searchHeapNumNodes) ; } /* branch and bound */ while (exactSearch || self->searchNumComparisons < self->forest->searchMaxNumComparisons) { /* pop the next optimal search node */ VlKDForestSearchState * searchState ; /* break if search space completed */ if (self->searchHeapNumNodes == 0) { break ; } searchState = self->searchHeapArray + vl_kdforest_search_heap_pop (self->searchHeapArray, &self->searchHeapNumNodes) ; /* break if no better solution may exist */ if (numAddedNeighbors == numNeighbors && neighbors[0].distance < searchState->distanceLowerBound) { self->searchNumSimplifications ++ ; break ; } vl_kdforest_query_recursively (self, searchState->tree, searchState->nodeIndex, neighbors, numNeighbors, &numAddedNeighbors, searchState->distanceLowerBound, query) ; } /* sort neighbors by increasing distance */ for (i = numAddedNeighbors ; i < numNeighbors ; ++ i) { neighbors[i].index = -1 ; neighbors[i].distance = VL_NAN_F ; } while (numAddedNeighbors) { vl_kdforest_neighbor_heap_pop (neighbors, &numAddedNeighbors) ; } return self->searchNumComparisons ; } /** ------------------------------------------------------------------ ** @brief Run multiple queries ** @param self object. ** @param indexes assignments of points. ** @param numNeighbors number of nearest neighbors to be found for each data point ** @param numQueries number of query points. ** @param distances distances of query points. ** @param queries lisf of vectors to use as queries. ** ** @a indexes and @a distances are @a numNeighbors by @a numQueries ** matrices containing the indexes and distances of the nearest neighbours ** for each of the @a numQueries queries @a queries. ** ** This function is similar to ::vl_kdforest_query. The main ** difference is that the function can use multiple cores to query ** large amounts of data. ** ** @sa ::vl_kdforest_query. **/ vl_size vl_kdforest_query_with_array (VlKDForest * self, vl_uint32 * indexes, vl_size numNeighbors, vl_size numQueries, void * distances, void const * queries) { vl_size numComparisons = 0; vl_type dataType = vl_kdforest_get_data_type(self) ; vl_size dimension = vl_kdforest_get_data_dimension(self) ; #ifdef _OPENMP #pragma omp parallel default(shared) num_threads(vl_get_max_threads()) #endif { vl_index qi ; vl_size thisNumComparisons = 0 ; VlKDForestSearcher * searcher ; VlKDForestNeighbor * neighbors ; #ifdef _OPENMP #pragma omp critical #endif { searcher = vl_kdforest_new_searcher(self) ; neighbors = vl_calloc (sizeof(VlKDForestNeighbor), numNeighbors) ; } #ifdef _OPENMP #pragma omp for #endif for(qi = 0 ; qi < (signed)numQueries; ++ qi) { switch (dataType) { case VL_TYPE_FLOAT: { vl_size ni; thisNumComparisons += vl_kdforestsearcher_query (searcher, neighbors, numNeighbors, (float const *) (queries) + qi * dimension) ; for (ni = 0 ; ni < numNeighbors ; ++ni) { indexes [qi*numNeighbors + ni] = (vl_uint32) neighbors[ni].index ; if (distances){ *((float*)distances + qi*numNeighbors + ni) = neighbors[ni].distance ; } } break ; } case VL_TYPE_DOUBLE: { vl_size ni; thisNumComparisons += vl_kdforestsearcher_query (searcher, neighbors, numNeighbors, (double const *) (queries) + qi * dimension) ; for (ni = 0 ; ni < numNeighbors ; ++ni) { indexes [qi*numNeighbors + ni] = (vl_uint32) neighbors[ni].index ; if (distances){ *((double*)distances + qi*numNeighbors + ni) = neighbors[ni].distance ; } } break ; } default: abort() ; } } #ifdef _OPENMP #pragma omp critical #endif { numComparisons += thisNumComparisons ; vl_kdforestsearcher_delete (searcher) ; vl_free (neighbors) ; } } return numComparisons ; } /** ------------------------------------------------------------------ ** @brief Get the number of nodes of a given tree ** @param self KDForest object. ** @param treeIndex index of the tree. ** @return number of trees. **/ vl_size vl_kdforest_get_num_nodes_of_tree (VlKDForest const * self, vl_uindex treeIndex) { assert (treeIndex < self->numTrees) ; return self->trees[treeIndex]->numUsedNodes ; } /** ------------------------------------------------------------------ ** @brief Get the detph of a given tree ** @param self KDForest object. ** @param treeIndex index of the tree. ** @return number of trees. **/ vl_size vl_kdforest_get_depth_of_tree (VlKDForest const * self, vl_uindex treeIndex) { assert (treeIndex < self->numTrees) ; return self->trees[treeIndex]->depth ; } /** ------------------------------------------------------------------ ** @brief Get the number of trees in the forest ** ** @param self KDForest object. ** @return number of trees. **/ vl_size vl_kdforest_get_num_trees (VlKDForest const * self) { return self->numTrees ; } /** ------------------------------------------------------------------ ** @brief Set the maximum number of comparisons for a search ** ** @param self KDForest object. ** @param n maximum number of leaves. ** ** This function sets the maximum number of comparisons for a ** nearest neighbor search. Setting it to 0 means unbounded comparisons. ** ** @sa ::vl_kdforest_query, ::vl_kdforest_get_max_num_comparisons. **/ void vl_kdforest_set_max_num_comparisons (VlKDForest * self, vl_size n) { self->searchMaxNumComparisons = n ; } /** ------------------------------------------------------------------ ** @brief Get the maximum number of comparisons for a search ** ** @param self KDForest object. ** @return maximum number of leaves. ** ** @sa ::vl_kdforest_set_max_num_comparisons. **/ vl_size vl_kdforest_get_max_num_comparisons (VlKDForest * self) { return self->searchMaxNumComparisons ; } /** ------------------------------------------------------------------ ** @brief Set the thresholding method ** @param self KDForest object. ** @param method one of ::VlKDTreeThresholdingMethod. ** ** @sa ::vl_kdforest_get_thresholding_method **/ void vl_kdforest_set_thresholding_method (VlKDForest * self, VlKDTreeThresholdingMethod method) { assert(method == VL_KDTREE_MEDIAN || method == VL_KDTREE_MEAN) ; self->thresholdingMethod = method ; } /** ------------------------------------------------------------------ ** @brief Get the thresholding method ** ** @param self KDForest object. ** @return thresholding method. ** ** @sa ::vl_kdforest_set_thresholding_method **/ VlKDTreeThresholdingMethod vl_kdforest_get_thresholding_method (VlKDForest const * self) { return self->thresholdingMethod ; } /** ------------------------------------------------------------------ ** @brief Get the dimension of the data ** @param self KDForest object. ** @return dimension of the data. **/ vl_size vl_kdforest_get_data_dimension (VlKDForest const * self) { return self->dimension ; } /** ------------------------------------------------------------------ ** @brief Get the data type ** @param self KDForest object. ** @return data type (one of ::VL_TYPE_FLOAT, ::VL_TYPE_DOUBLE). **/ vl_type vl_kdforest_get_data_type (VlKDForest const * self) { return self->dataType ; } /** ------------------------------------------------------------------ ** @brief Get the forest linked to the searcher ** @param self object. ** @return correspoinding KD-Forest. **/ VlKDForest * vl_kdforestsearcher_get_forest (VlKDForestSearcher const * self) { return self->forest; }
flexSuperpixelOperator.h
#ifndef flexSuperpixelOperator_H #define flexSuperpixelOperator_H #include <vector> #include "flexLinearOperator.h" //! represents a superpixel operator /*! downsamples data of size upsamplingFactor * targetDimension size to targetDimension */ template<typename T> class flexSuperpixelOperator : public flexLinearOperator<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif private: std::vector<int> targetDimension; T upsamplingFactor; public: //! initializes the superpixel operator. Downsamples image of size aUpsamplingFactor * aTargetDimension to size aTargetDimension /*! \param aTargetDimension target dimension of downsampled image \param aUpsamplingFactor aUpsamplingFactor * aTargetDimension is original image size \param aMinus determines if operator is negated \sa isMinus */ flexSuperpixelOperator(std::vector<int> aTargetDimension, T aUpsamplingFactor, bool aMinus) : flexLinearOperator<T>((int)(vectorProduct(aTargetDimension)), (int)(vectorProduct(aTargetDimension)*aUpsamplingFactor*aUpsamplingFactor), superpixelOp, aMinus) { this->targetDimension.resize(aTargetDimension.size()); this->targetDimension = aTargetDimension; this->upsamplingFactor = aUpsamplingFactor; }; flexSuperpixelOperator<T>* copy() { return new flexSuperpixelOperator<T>(this->targetDimension, this->upsamplingFactor, this->isMinus); } //to implement void times(bool transposed, const Tdata &input, Tdata &output) { } void timesPlus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { doTimes(transposed,input,output, MINUS); } else { doTimes(transposed,input,output, PLUS); } } void timesMinus(bool transposed, const Tdata &input, Tdata &output) { if (this->isMinus) { doTimes(transposed,input,output, PLUS); } else { doTimes(transposed,input,output, MINUS); } } std::vector<T> getAbsRowSum(bool transposed) { if (transposed) { return std::vector<T>(this->getNumCols(), (T)1 / (T)(this->upsamplingFactor*this->upsamplingFactor)); } else { return std::vector<T>(this->getNumRows(), (T)1); } } T getMaxRowSumAbs(bool transposed) { if (transposed) { return (T)1 / (T)(this->upsamplingFactor*this->upsamplingFactor); } else { return (T)1; } } #ifdef __CUDACC__ thrust::device_vector<T> getAbsRowSumCUDA(bool transposed) { Tdata result(this->getNumRows(),(T)1); return result; } #endif private: int indexI(int index, int sizeX) { return index % sizeX; } int indexJ(int index, int sizeX, int sizeY) { return (index / sizeX) % sizeY; } int index2DtoLinear(int i, int j, int sizeY) { return (i*sizeY + j); } void calcTimes(const Tdata &input, Tdata &output, mySign signRule) { T factor = (T)1 / (this->upsamplingFactor*this->upsamplingFactor); int iOuterEnd = targetDimension[0]; int jOuterEnd = targetDimension[1]; int sizeY = targetDimension[1] * (int)this->upsamplingFactor; #pragma omp parallel for for (int i = 0; i < iOuterEnd; ++i) { for (int j = 0; j < jOuterEnd; ++j) { //printf("Output: (%d,%d) : %d\n", i, j, index2DtoLinear(i, j, this->targetDimension[1])); int outputIndex = index2DtoLinear(i, j, targetDimension[1]); int iInnerStart = i*(int)this->upsamplingFactor; int iInnerEnd = (i + 1)*(int)this->upsamplingFactor; int jInnerStart = j*(int)this->upsamplingFactor; int jInnerEnd = (j + 1)*(int)this->upsamplingFactor; T tmpResult = (T)0; for (int iInner = iInnerStart; iInner < iInnerEnd; ++iInner) { for (int jInner = jInnerStart; jInner < jInnerEnd; ++jInner) { int inputIndex = index2DtoLinear(iInner, jInner, sizeY); tmpResult += input[inputIndex]; /*printf("Inner: (%d,%d) : %d\n", iInner, jInner, inputIndex); int innerJ = indexI(inputIndex, this->targetDimension[0] * this->upsamplingFactor); int innerI = indexJ(inputIndex, this->targetDimension[0] * this->upsamplingFactor, this->targetDimension[1] * this->upsamplingFactor); printf("Back: (%d,%d) \n", innerI, innerJ); int backI = innerI / this->upsamplingFactor; int backJ = innerJ / this->upsamplingFactor; printf("BackInner: (%d,%d) \n", backI, backJ); if (backI != i || backJ != j) { mexErrMsgTxt("PROBLEM!!!\n"); }*/ } } switch (signRule) { case PLUS: { output[outputIndex] += factor*tmpResult; break; } case MINUS: { output[outputIndex] -= factor*tmpResult; break; } case EQUALS: { output[outputIndex] = factor*tmpResult; break; } } } } } void calcTimesTransposed(const Tdata &input, Tdata &output, mySign signRule) { T factor = (T)1 / (this->upsamplingFactor*this->upsamplingFactor); int sizeX = targetDimension[0] * (int)this->upsamplingFactor; int sizeY = targetDimension[1] * (int)this->upsamplingFactor; #pragma omp parallel for for (int i = 0; i < sizeX; ++i) { for (int j = 0; j < sizeY; ++j) { int inputIndex = index2DtoLinear(i, j, sizeY); //int innerJ = indexI(inputIndex, this->targetDimension[0] * this->upsamplingFactor); //int innerI = indexJ(inputIndex, this->targetDimension[0] * this->upsamplingFactor, this->targetDimension[1] * this->upsamplingFactor); //printf("Back: (%d,%d) \n", innerI, innerJ); int backI = i / (int)this->upsamplingFactor; int backJ = j / (int)this->upsamplingFactor; int outputIndex = index2DtoLinear(backI, backJ, targetDimension[1]); //printf("Back: (%d,%d) %d,%d \n", backI, backJ, inputIndex, outputIndex); switch (signRule) { case PLUS: { output[inputIndex] += factor*input[outputIndex]; break; } case MINUS: { output[inputIndex] -= factor*input[outputIndex]; break; } case EQUALS: { output[inputIndex] = factor*input[outputIndex]; break; } } } } } void doTimes(bool transposed, const Tdata &input, Tdata &output, mySign signRule) { if (transposed) { calcTimesTransposed(input, output, signRule); } else { calcTimes(input, output, signRule); } } }; #endif
ast-dump-openmp-parallel.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp parallel ; } // CHECK: TranslationUnitDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl 0x{{.*}} <{{.*}}ast-dump-openmp-parallel.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt 0x{{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPParallelDirective 0x{{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt 0x{{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl 0x{{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt 0x{{.*}} <col:3> openmp_structured_block // CHECK-NEXT: |-ImplicitParamDecl 0x{{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl 0x{{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl 0x{{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel.c:4:1) *const restrict'
darts-omp.c
/* Compute pi using OpenMP */ #include "lcgenerator.h" #include <omp.h> #include <stdio.h> static long num_trials = 1000000; int main(int argc, char **argv) { long i; long Ncirc = 0; double pi, x, y; double r = 1.0; // radius of circle double r2 = r*r; #pragma omp parallel { #pragma omp for private(x,y) reduction(+:Ncirc) for (i = 0; i < num_trials; i++) { #pragma omp critical (randoms) { x = lcgrandom(); y = lcgrandom(); } if ((x*x + y*y) <= r2) Ncirc++; } } pi = 4.0 * ((double)Ncirc)/((double)num_trials); printf("\n \t Computing pi using OpenMP: \n"); printf("\t For %ld trials, pi = %f\n", num_trials, pi); printf("\n"); return 0; }
omp_zsymm_batch.c
/** * @file omp_zsymm_batch.c * * @brief BBLAS omp_zsymm_batch double _Complex routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @precisions normal z -> c d s **/ #endif #include<cblas.h> #include "bblas_omp.h" #include "bblas.h" #include <omp.h> #define COMPLEX /** Purpose ------- <b>zsymm_batch</b> is an OpenMP version of zsymm_batch. It performs one of the matrix-matrix operations arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i], or arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i], where alpha[i] and beta[i] are scalars, arrayA[i] is a symmetric matrix and arrayB[i] and arrayC[i] are M[i] by N[i] matrices. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of side[0], uplo[0], M[0], N[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] side Array of <tt>enum BBLAS_SIDE</tt>. Each element side[i] specifies whether the symmetric matrix arrayA[i] appears on the left or right side of the operation as follows: - = 'BblasLeft' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i] + beta[i]*arrayC[i]. - = 'BblasRight' arrayC[i] = alpha[i]*arrayB[i]*arrayA[i] + beta[i]*arrayC[i]. @param[in] uplo Array of <tt>enum BBLAS_UPLO</tt>. On entry, uplo[i] specifies whether the upper or lower triangular part of the symmetric matrix arrayA[i] is to be referenced as follows: - = 'BblasUpper' Only the upper triangular part of arrayA[i] is to be referenced. - = 'BblasLower' Only the lower triangular part of arrayA[i] is to be referenced. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] alpha Array of <tt>complex_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of dimension lda[i] by Ka[i], where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise. When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must contain the symmetric matrix: when uplo[i] = BblasUpper, the upper triangular part of arrayA[i] must contain the upper triangular part of the symmetric matrix whilst the strictly lower triangular part is not used; similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i] must contain the lower triangular part of the symmetric matrix whilst the strictly upper triangular part is not used. @param[in] lda Array of <tt>int</tt>. On entry, lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When side[i] = BblasLeft then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, N[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a COMPLEX_16 matrix of dimension ldb[i] by N[i]. The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ). @param[in] beta Array of <tt>complex_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a COMPLEX_16 matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix output. @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ). @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith zymm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_zsymm_batch( const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo, const int *M, const int *N, const BBLAS_Complex64_t *alpha, const BBLAS_Complex64_t **arrayA, const int *lda, const BBLAS_Complex64_t **arrayB, const int *ldb, const BBLAS_Complex64_t *beta, BBLAS_Complex64_t **arrayC, const int *ldc, const int batch_count, const enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int batch_iter; int LDA; char func_name[15] = "zsymm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((side[first_index] != BblasLeft) && (side[first_index] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_SIDE; } return; } if ((uplo[first_index] != BblasUpper) && (uplo[first_index] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_UPLO; } return; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (side[first_index] == BblasLeft) { LDA = M[first_index]; } else { LDA = N[first_index]; } if (lda[first_index] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (M[first_index] == 0 || N[first_index] == 0 || (alpha[first_index] == (BBLAS_Complex64_t)0.0 && beta[first_index] == (BBLAS_Complex64_t)1.0)) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for private( batch_iter) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /*Call to cblas_zsymm */ cblas_zsymm( BblasColMajor, side[first_index], uplo[first_index], M[first_index], N[first_index], CBLAS_SADDR(alpha[first_index]), arrayA[batch_iter], lda[first_index], arrayB[batch_iter], ldb[first_index], CBLAS_SADDR(beta[first_index]), arrayC[batch_iter], ldc[first_index]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private( batch_iter, LDA) for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { /* Check input arguments */ if ((side[batch_iter] != BblasLeft) && (side[batch_iter] != BblasRight)) { xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter); info[batch_iter] = BBLAS_ERR_SIDE; continue; } if ((uplo[batch_iter] != BblasUpper) && (uplo[batch_iter] != BblasLower)) { xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter); info[batch_iter] = BBLAS_ERR_UPLO; continue; } if (M[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter); info[batch_iter] = BBLAS_ERR_M; continue; } if (N[batch_iter] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter); info[batch_iter] = BBLAS_ERR_N; continue; } if (side[batch_iter] == BblasLeft) { LDA = M[batch_iter]; } else { LDA = N[batch_iter]; } if (lda[batch_iter] < LDA) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter); info[batch_iter] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter); info[batch_iter] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter] < max(1, M[batch_iter])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter); info[batch_iter] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter] == 0 || N[batch_iter] == 0 || (alpha[batch_iter] == (BBLAS_Complex64_t)0.0 && beta[batch_iter] == (BBLAS_Complex64_t)1.0)) { info[batch_iter] = BBLAS_SUCCESS; continue; } cblas_zsymm( BblasColMajor, side[batch_iter], uplo[batch_iter], M[batch_iter], N[batch_iter], CBLAS_SADDR(alpha[batch_iter]), arrayA[batch_iter], lda[batch_iter], arrayB[batch_iter], ldb[batch_iter], CBLAS_SADDR(beta[batch_iter]), arrayC[batch_iter], ldc[batch_iter]); /* Successful */ info[batch_iter] = BBLAS_SUCCESS; } }else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef COMPLEX
GB_unaryop__ainv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_fp64 // op(A') function: GB_tran__ainv_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
neuron_update.h
#pragma omp declare target inline int _timestep(float t, float dt) { return (int)((t + 1e-3f*dt)/dt); } #pragma omp end declare target void neurongroup_stateupdater ( float* __restrict _ptr_array_neurongroup_ge, float* __restrict _ptr_array_neurongroup_gi, float* __restrict _ptr_array_neurongroup_h, float* __restrict _ptr_array_neurongroup_m, float* __restrict _ptr_array_neurongroup_n, float* __restrict _ptr_array_neurongroup_v, float* __restrict _ptr_array_neurongroup_lastspike, float* __restrict _ptr_array_defaultclock_dt, float*__restrict _ptr_array_defaultclock_t, char* __restrict _ptr_array_neurongroup_not_refractory, const int _N, const int iteration ) { const float dt = _ptr_array_defaultclock_dt[0]; const float t = _ptr_array_defaultclock_t[0]; const int _lio_1 = _timestep(0.003, dt); const float _lio_2 = 9.939082f; //1.0f*(0.3291372 * exp(1.0f*(0.055555556 * (-0.063f))/0.001f))/0.001f; const float _lio_3 = -55.555556f; //1.0f*(-0.055555556)/0.001f; const float _lio_4 = 0.00001f; //2980.958 * (0.001f * exp(1.0f*(0.2 * (-0.063f))/0.001f)); const float _lio_5 = -200.0f; //1.0f*(-0.2)/0.001f; const float _lio_6 = -0.02016f; //0.32 * (-0.063f); const float _lio_7 = -0.000001f; //- 0.001f * 0.001f; const float _lio_8 = 0; //25.79034 * ((0.001f * 0.001f) * exp(1.0f*(0.25 * (-0.063f))/0.001f)); const float _lio_9 = -250.0f; //1.0f*(-0.25)/0.001f; const float _lio_10 = 0.00416f; //4.16 * 0.001f; const float _lio_11 = 0.02016f; //(-0.32) * (-0.063f); const float _lio_12 = -0.01764f; //0.28 * (-0.063f); const float _lio_13 = -0.000001f; //(-1.0) * (0.001f * 0.001f); const float _lio_14 = 0.000099f; //0.00033546262 * ((0.001f * 0.001f) * exp(1.0f*((-0.2) * (-0.063f))/0.001f)); const float _lio_15 = 200.0f; //1.0f*0.2/0.001f; const float _lio_16 = 0.0112f; //11.2 * 0.001f; const float _lio_17 = -0.002016f; //0.032 * (-0.063f); const float _lio_18 = 0; //20.085537 * ((0.001f * 0.001f) * exp(1.0f*(0.2 * (-0.063f))/0.001f)); const float _lio_19 = 0.00048f; //0.48 * 0.001f; const float _lio_20 = 0.002016f; //(-0.032) * (-0.063f); const float _lio_21 = 132.901474f; //1.0f*(0.6420127 * exp(1.0f*(0.025 * (-0.063f))/0.001f))/0.001f; const float _lio_22 = -25.0f ;//1.0f*(-0.025)/0.001f; const float _lio_23 = expf(-2000.0f*dt); const float _lio_24 = -3.0f; //1.0f*((-0.06f) * 1e-08f)/2e-10f; const float _lio_25 = -2700.0f; //1.0f*((-0.09f) * 6e-06f)/2e-10f; const float _lio_26 = 5000.0f; //1.0f*(0.05f * 2e-05f)/2e-10f; const float _lio_27 = 0; // 1.0f*0.0f/2e-10f; const float _lio_28 = -400000000.0f; //1.0f*(-0.08f)/2e-10f; const float _lio_29 = -50.0f; //0.0 - (1.0f*1e-08f/2e-10f); const float _lio_30 = -30000.0f; //1.0f*(- 6e-06f)/2e-10f; const float _lio_31 = 100000.0f; //1.0f*2e-05f/2e-10f; const float _lio_32 = 5000000000.0f; //1.0f*1.0/2e-10f; const float _lio_33 = expf(-100.0f*dt); #pragma omp target data map(tofrom: _ptr_array_neurongroup_h[0:_N],\ _ptr_array_neurongroup_m[0:_N], \ _ptr_array_neurongroup_n[0:_N], \ _ptr_array_neurongroup_ge[0:_N], \ _ptr_array_neurongroup_v[0:_N], \ _ptr_array_neurongroup_gi[0:_N] ) \ map(to: _ptr_array_neurongroup_lastspike[0:_N]) \ map(from: _ptr_array_neurongroup_not_refractory[0:_N]) { for (int i = 0; i < iteration; i++) { #pragma omp target teams distribute parallel for thread_limit(256) for(int _idx=0; _idx<_N; _idx++) { float h = _ptr_array_neurongroup_h[_idx]; float m = _ptr_array_neurongroup_m[_idx]; float n = _ptr_array_neurongroup_n[_idx]; float ge = _ptr_array_neurongroup_ge[_idx]; float v = _ptr_array_neurongroup_v[_idx]; const float lastspike = _ptr_array_neurongroup_lastspike[_idx]; float gi = _ptr_array_neurongroup_gi[_idx]; char not_refractory; not_refractory = _timestep(t - lastspike, dt) >= _lio_1; const float _BA_h = (_lio_2 * expf(_lio_3 * v))/(((-4.0f)/(0.001f + (_lio_4 * expf(_lio_5 * v)))) - (_lio_2 * expf(_lio_3 * v))); const float _h = (- _BA_h) + ((_BA_h + h) * expf(dt * (((-4.0f)/(0.001f + (_lio_4 * expf(_lio_5 * v)))) - (_lio_2 * expf(_lio_3 * v))))); const float _BA_m = (((_lio_6/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))) + (_lio_10/(_lio_7 + (_lio_8 * expf(_lio_9 * v))))) - ((0.32f * v)/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))))/(((((_lio_11/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))) + (_lio_12/(_lio_13 + (_lio_14 * expf(_lio_15 * v))))) + (_lio_16/(_lio_13 + (_lio_14 * expf(_lio_15 * v))))) + ((0.32f * v)/(_lio_7 + (_lio_8 * expf(_lio_9 * v))))) - ((_lio_10/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))) + ((0.28f * v)/(_lio_13 + (_lio_14 * expf(_lio_15 * v)))))); const float _m = (- _BA_m) + ((_BA_m + m) * expf(dt * (((((_lio_11/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))) + (_lio_12/(_lio_13 + (_lio_14 * expf(_lio_15 * v))))) + (_lio_16/(_lio_13 + (_lio_14 * expf(_lio_15 * v))))) + ((0.32f * v)/(_lio_7 + (_lio_8 * expf(_lio_9 * v))))) - ((_lio_10/(_lio_7 + (_lio_8 * expf(_lio_9 * v)))) + ((0.28f * v)/(_lio_13 + (_lio_14 * expf(_lio_15 * v)))))))); const float _BA_n = (((_lio_17/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))) + (_lio_19/(_lio_7 + (_lio_18 * expf(_lio_5 * v))))) - ((0.032f * v)/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))))/(((_lio_20/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))) + ((0.032f * v)/(_lio_7 + (_lio_18 * expf(_lio_5 * v))))) - ((_lio_19/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))) + (_lio_21 * expf(_lio_22 * v)))); const float _n = (- _BA_n) + ((_BA_n + n) * expf(dt * (((_lio_20/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))) + ((0.032f * v)/(_lio_7 + (_lio_18 * expf(_lio_5 * v))))) - ((_lio_19/(_lio_7 + (_lio_18 * expf(_lio_5 * v)))) + (_lio_21 * expf(_lio_22 * v)))))); const float _ge = _lio_23 * ge; const float _BA_v = (_lio_24 + ((((_lio_25 * (n*n*n*n)) + (_lio_26 * (h * (m*m*m)))) + (_lio_27 * ge)) + (_lio_28 * gi)))/((_lio_29 + (_lio_30 * (n*n*n*n))) - (((_lio_31 * (h * (m*m*m))) + (_lio_32 * ge)) + (_lio_32 * gi))); const float _v = (- _BA_v) + ((_BA_v + v) * expf(dt * ((_lio_29 + (_lio_30 * (n*n*n*n))) - (((_lio_31 * (h * (m*m*m))) + (_lio_32 * ge)) + (_lio_32 * gi))))); const float _gi = _lio_33 * gi; _ptr_array_neurongroup_h[_idx] = _h; _ptr_array_neurongroup_m[_idx] = _m; _ptr_array_neurongroup_n[_idx] = _n; _ptr_array_neurongroup_ge[_idx] = _ge; _ptr_array_neurongroup_v[_idx] = _v; _ptr_array_neurongroup_gi[_idx] = _gi; _ptr_array_neurongroup_not_refractory[_idx] = not_refractory; } } } }
subteam2.c
/* test two omp for loops in two subteams and a single thread in the 3rd subteam */ #include <stdio.h> #include <stdlib.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ /*by Liao, new data types and functions to support thread subteams*/ /*compiler generated new data type to store thread ids in a subteam*/ typedef struct{ int iCount; int *iThreadIds; } omp_id_set_t; omp_id_set_t idSet1,idSet2,idSet3; extern int __ompc_is_in_idset(); extern void __ompc_subteam_create(); void *subteam1, *subteam2, *subteam3; /*use it as &threadsubteam*/ #define NUMELEMENT 100 int main(void) { int a[NUMELEMENT]; int i,j=0,k,sum=0,sum2=0; /* assume 5 threads */ #ifdef _OPENMP omp_set_num_threads(5); #endif /* manual code to generate the thread subteams' ID sets currently */ /*stuff code to get ids from the thread ids in the subteam*/ idSet1.iCount=2; idSet1.iThreadIds=(int *)malloc(2*sizeof(int)); idSet1.iThreadIds[0]=1; idSet1.iThreadIds[1]=3; idSet2.iCount=2; idSet2.iThreadIds=(int *)malloc(2*sizeof(int)); idSet2.iThreadIds[0]=0; idSet2.iThreadIds[1]=2; idSet3.iCount=1; idSet3.iThreadIds=(int *)malloc(1*sizeof(int)); idSet3.iThreadIds[0]=1; #pragma omp parallel { /* onthreads(0,2) */ #pragma omp for reduction(+:sum) for (i=1;i<=NUMELEMENT;i++) { sum = sum +i; } /* onthreads(1,3) */ #pragma omp for schedule(dynamic,5) for (i=0;i<NUMELEMENT;i++) { a[i]=9; } /* onthread 4 */ #pragma omp single { #ifdef _OPENMP j=omp_get_thread_num(); #endif printf("I am the single one: %d\n",j ); } }/*end of parallel */ /*------verify results---------------*/ for (i=0;i<NUMELEMENT;i++) { sum2=sum2+a[i]; } printf("sum=%d\n",sum); printf("sum2=%d\n",sum2); return 0; }