source
stringlengths
3
92
c
stringlengths
26
2.25M
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % John Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/color-private.h" #include "magick/cache.h" #include "magick/draw.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/transform.h" #include "magick/signature-private.h" #include "magick/utility.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; InterpolatePixelMethod interpolate; VirtualPixelMethod virtual_pixel; FilterTypes filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; MagickPixelPacket average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter, 1.0); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resample_filter=(ResampleFilter *) AcquireMagickMemory( sizeof(*resample_filter)); if (resample_filter == (ResampleFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireCacheView(resample_filter->image); resample_filter->debug=IsEventLogging(); resample_filter->signature=MagickSignature; resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined = MagickFalse; /* initialise the resampling filter settings */ SetResampleFilter(resample_filter, image->filter, image->blur); (void) SetResampleFilterInterpolateMethod(resample_filter, image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,MagickPixelPacket *pixel) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, MagickPixelPacket *pixel) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const PixelPacket *pixels; register const IndexPacket *indexes; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); status=MagickTrue; /* GetMagickPixelPacket(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image? And is that area a simple solid color - then return that color */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* whole area is a solid color -- just return that color */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel, resample_filter->exception); return(status); } /* Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireCacheView(average_image); pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const PixelPacket *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view); SetMagickPixelPacket(resample_filter->image,pixels,indexes, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is avergae of image average half background */ /* FUTURE: replace with a 50% blend of both pixels */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->average_pixel.opacity)); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->image->background_color.opacity)); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.opacity += resample_filter->image->background_color.opacity; divisor_c += weight; resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.opacity /= 2; } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->matte != MagickFalse) pixel->opacity = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewVirtualIndexQueue(resample_filter->view); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->opacity += weight*pixels->opacity; divisor_m += weight; if (pixel->matte != MagickFalse) weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity)); pixel->red += weight*pixels->red; pixel->green += weight*pixels->green; pixel->blue += weight*pixels->blue; if (pixel->colorspace == CMYKColorspace) pixel->index += weight*(*indexes); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels++; indexes++; Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 ) { /* not enough pixels in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->opacity = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity); divisor_c = 1.0/divisor_c; pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red); pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); const double sqrt_discriminant = sqrt(discriminant); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major = MagickHuge; else Major = sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickHuge ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support). */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The default Filter, is Gaussian, which is the standard filter used by the % original paper on the Elliptical Weighted Everage Algorithm. However other % filters can also be used. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterTypes filter,const double blur) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % % o blur: filter blur factor (radial scaling) for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterTypes filter,const double blur) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; if ( filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Set a default cylindrical filter of a 'low blur' Jinc windowed Jinc */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,blur,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to default EWA gaussian filter"); resample_filter->filter = PointFilter; } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. It is very wrong, and I don't understand exactly what it was attempting to do. */ /* Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The above came from some reference code provided by Fred Weinhaus and seems to have been a guess that was appropriate for its use in a 3d perspective landscape mapping program. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; break; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); if (IsMagickTrue(GetImageArtifact(resample_filter->image,"resample:verbose")) ) { /* Debug output of the filter weighting LUT Gnuplot the LUT with hoizontal adjusted to 'r' using... plot [0:2][-.2:1] "lut.dat" using (sqrt($0/1024)*2):1 with lines The filter values is normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values)\n", WLUT_WIDTH); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# And the whole table represents the filters support.\n"); printf("\n"); /* generates a 'break' in gnuplot if multiple outputs */ for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); } /* output the above once only for each image, and each setting */ (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const InterpolatePixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
schedule-clause.c
/* $ ./bin/schedule-clause 2 thread 1 suma a[2] suma=2 thread 1 suma a[3] suma=5 thread 2 suma a[4] suma=4 thread 2 suma a[5] suma=9 thread 0 suma a[0] suma=0 thread 0 suma a[1] suma=1 thread 0 suma a[6] suma=7 Fuera de 'parallel for' suma=7 Las iteraciones 0,1 las hace hebra 0 las iteraciones 2,3 las hace hebra 1 las iteraciones 4,5 las hace hebra 2 las iteraciones 6 las hace hebra 0 $ ./bin/schedule-clause 4 thread 1 suma a[4] suma=4 thread 1 suma a[5] suma=9 thread 1 suma a[6] suma=15 thread 0 suma a[0] suma=0 thread 0 suma a[1] suma=1 thread 0 suma a[2] suma=3 thread 0 suma a[3] suma=6 Fuera de 'parallel for' suma=15 Las iteraciones 0,1,2,3 han sido hechas por la hebra 0 las iteraciones 4,5,6 han sido hechas por la hebra 1 */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc, char **argv) { int i, n = 16,chunk, a[n],suma=0; if(argc < 2) { fprintf(stderr,"\nFalta chunk \n"); exit(-1); } // numero de iteraciones que se reparten en las hebras si ponemos un 2 y tenemos 4 hebras se reparten asi // hebra 0 -> i = 0 // hebra 1 -> i = 2 // hebra 2 -> i = 4 // hebra 3 -> i = 6 chunk = atoi(argv[1]); // las variables static se asignan en tiempo de compilacion for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(static,chunk) for (i=0; i<n; i++){ suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n", omp_get_thread_num(),i,suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); }
teams.c
#include <stdlib.h> #include <assert.h> #include <omp.h> int main() { int res = 0, n = 10; #pragma omp teams num_teams(n) reduction(+:res) { res = omp_get_team_num(); if (omp_get_team_num() == 0) n = omp_get_num_teams(); } Assert (res == (n*(n-1))/2); // Sum of first n-1 natural numbers }
PosTransformer.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- /** @file VectorOperators.h * @brief Support funtions to handle position type data manged by soa */ #ifndef QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H #define QMCPLUSPLUS_SOA_FAST_PARTICLE_OPERATORS_H #include <simd/blas1.hpp> namespace qmcplusplus { //Need to reorg #if 0 /** Dummy template class to be specialized * * - T1 the datatype to be transformed * - D dimension * - ORTHO true, if only Diagonal Elements are used */ template<class T1, unsigned D, bool ORTHO> struct PosTransformer { }; /** Specialized PosTransformer<T,3,true> using only the diagonal elements */ template<class T> struct PosTransformer<T,3,true> { using Array_t=VectorSoaContainer<T,3>; using Transformer_t=Tensor<T,3>; //index for the tensor enum {iXX=0, iXY=1, iXZ=2, iYX=3, iYY=4, iYZ=5, iZX=6, iZY=7, iZZ=8}; inline static void apply(const Array_t& pin, const Transformer_t& X, Array_t& pout, int first, int last) { const int n=last-first; blas::axpy(X[iXX],pin.data(0),pout.data(0),n); blas::axpy(X[iYY],pin.data(1),pout.data(1),n); blas::axpy(X[iZZ],pin.data(2),pout.data(2),n); } inline static void apply(const Transformer_t& X, const Array_t& pin, Array_t& pout, int first, int last) { ::apply(pin,X,pout,first,last); } inline static void apply(Array_t& pinout, const Transformer_t& X,int first, int last) { const int n=last-first; blas::scal(X[iXX],pinout.data(0),n); blas::scal(X[iYY],pinout.data(1),n); blas::scal(X[iZZ],pinout.data(2),n); } inline static void apply(const Transformer_t& X, Array_t& pinout, int first, int last) { ::apply(pinout,X,first,last); } }; template<class T> struct PosTransformer<T,3,false> { using Array_t=VectorSoaContainer<T,3>; using Transformer_t=Tensor<T,3>; inline static void apply(const Array_t& pin, const Transformer_t& X, Array_t& pout, int first, int last) { const int n=last-first; register T x00=X[0],x01=X[1],x02=X[2], x10=X[3],x11=X[4],x12=X[5], x20=X[6],x21=X[7],x22=X[8]; const T* restrict x_in=pin.data(0)+first; ASSUME_ALIGNED(x_in); const T* restrict y_in=pin.data(1)+first; ASSUME_ALIGNED(y_in); const T* restrict z_in=pin.data(2)+first; ASSUME_ALIGNED(z_in); T* restrict x_out=pout.data(0)+first; ASSUME_ALIGNED(x_out); T* restrict y_out=pout.data(1)+first; ASSUME_ALIGNED(y_out); T* restrict z_out=pout.data(2)+first; ASSUME_ALIGNED(z_out); #pragma ivdep for(int i=0; i<n; i++) { x_out[i]=x_in[i]*x00+y_in[i]*x10+z_in[i]*x20; y_out[i]=x_in[i]*x01+y_in[i]*x11+z_in[i]*x21; z_out[i]=x_in[i]*x02+y_in[i]*x12+z_in[i]*x22; } } inline static void apply(const Transformer_t& X, const Array_t& pin, Array_t& pout, int first, int last) { ::apply(pin,X,pout,first,last); } inline static void apply(Array_t& pinout, const Transformer_t& X,int first, int last) { const int n=last-first; register T x00=X[0],x01=X[1],x02=X[2], x10=X[3],x11=X[4],x12=X[5], x20=X[6],x21=X[7],x22=X[8]; T* restrict x_inout=pinout.data(0)+first; ASSUME_ALIGNED(x_inout); T* restrict y_inout=pinout.data(1)+first; ASSUME_ALIGNED(y_inout); T* restrict z_inout=pinout.data(2)+first; ASSUME_ALIGNED(z_inout); #pragma ivdep for(int i=0; i<n; i++) { T x=x_inout[i]*x00+y_inout[i]*x10+z_inout[i]*x20; T y=x_inout[i]*x01+y_inout[i]*x11+z_inout[i]*x21; T z=x_inout[i]*x02+y_inout[i]*x12+z_inout[i]*x22; x_inout[i]=x; y_inout[i]=y; z_inout[i]=z; } } inline static void apply(const Transformer_t& X, Array_t& pinout, int first, int last) { ::apply(X,pinout,first,last); } }; #endif /** General conversion function from AoS[nrows][ncols] to SoA[ncols][ldb] * @param nrows the first dimension * @param ncols the second dimension * @param iptr input pointer * @param lda stride of iptr * @param out output pointer * @param lda strided of out * * Modeled after blas/lapack for lda/ldb */ template<typename T1, typename T2> void PosAoS2SoA(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb) { T2* restrict x=out ; T2* restrict y=out+ ldb; T2* restrict z=out+2*ldb; #pragma omp simd aligned(x,y,z) for(int i=0; i<nrows;++i) { x[i]=iptr[i*ncols ]; //x[i]=in[i][0]; y[i]=iptr[i*ncols+1]; //y[i]=in[i][1]; z[i]=iptr[i*ncols+2]; //z[i]=in[i][2]; } } /** General conversion function from SoA[ncols][ldb] to AoS[nrows][ncols] * @param nrows the first dimension * @param ncols the second dimension * @param iptr input pointer * @param lda stride of iptr * @param out output pointer * @param lda strided of out * * Modeled after blas/lapack for lda/ldb */ template<typename T1, typename T2> void PosSoA2AoS(int nrows, int ncols, const T1* restrict iptr, int lda, T2* restrict out, int ldb) { const T1* restrict x=iptr ; const T1* restrict y=iptr+ lda; const T1* restrict z=iptr+2*lda; #pragma omp simd aligned(x,y,z) for(int i=0; i<nrows;++i) { out[i*ldb ]=x[i]; //out[i][0]=x[i]; out[i*ldb+1]=y[i]; //out[i][1]=y[i]; out[i*ldb+2]=z[i]; //out[i][2]=z[i]; } } #if 0 //#if defined(HAVE_MKL) ///specialization for double AoS2SoA template<> void PosAoS2SoA(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb) { const double zone={1.0}; mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for float AoS2SoA template<> void PosAoS2SoA(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb) { const float zone={1.0f}; mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for double SoA2AoS template<> void PosSoA2AoS(int nrows, int ncols, const double* restrict in, int lda, double* restrict out, int ldb) { const double zone={1.0}; mkl_domatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } ///specialization for float SoA2AoS template<> void PosSoA2AoS(int nrows, int ncols, const float* restrict in, int lda, float* restrict out, int ldb) { const float zone={1.0f}; mkl_somatcopy('R','T',nrows,ncols,zone,in,lda,out,ldb); } #endif } #endif
GB_unop__exp_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__exp_fp32_fp32 // op(A') function: GB_unop_tran__exp_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = expf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = expf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = expf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__exp_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = expf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = expf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__exp_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common_functions.h
/* * Evan Lezar * 18 November 2010 * * Post processing of the eigenvalues calcuated using the ARPACK-based solvers routines * */ void apply_shift ( int N, float* S, float* T, int LDMAT, float shift ) { // T = T - shift*S checkpoint t0 = tic(); int row, col; #pragma omp parallel default(shared) private(row, col) { #pragma omp for schedule(runtime) for ( col = 0; col < N; ++col ) for ( row = 0; row < N; ++row ) T[col*LDMAT + row] = T[col*LDMAT + row] - shift*S[col*LDMAT + row]; } } struct float_complex { float x, y; }; typedef struct float_complex float_complex; inline float_complex make_float_complex ( float x, float y ) { float_complex c = {x, y}; return c; } float_complex invert ( float_complex v ) { float_complex l; l.x = v.x/(v.x*v.x + v.y*v.y); l.y = -v.y/(v.x*v.x + v.y*v.y); return l; } // return v / ( 1.0 + v*shift ); float_complex unshift ( float_complex v, float shift ) { float_complex l = v; // calculate ( 1.0 + v*shift ) l.x = l.x*shift + 1.0; l.y *= shift; float_complex result; // calculate v/l result.x = ( v.x*l.x + v.y*l.y ) / ( l.x*l.x + l.y*l.y ); result.y = ( v.y*l.x - v.x*l.y ) / ( l.x*l.x + l.y*l.y ); return result; } float eigenvalue_magnitude ( const float real, const float imag ) { return sqrt ( real*real + imag*imag ); } int insert_smallest_into_list ( const float* value_list, int* index_list, const int N, const float value, const int index ) { if ( isnan(value) | isinf(value) ) { return -1; } if ( index == 0 ) { // first element to insert index_list[0] = 0; return 0; } int i, j, insert_at = -1; int limit = index; if ( limit > (N-1) ) limit = (N-1); for ( i=0; i < limit; ++i ) { if ( value_list[index_list[i]] > value ) { insert_at = i; int last = index_list[limit]; for ( j = limit; j > i; j-- ) { index_list[j] = index_list[j-1]; } if ( (limit+1) < N ) index_list[limit+1] = last; break; } } if ( insert_at < 0 ) { insert_at = limit; } index_list[insert_at] = index; return insert_at; } int a_is_larger_than_b ( float a, float b ) { if ( isnan(a) | isinf(a) ) return 1; if ( isnan(b) | isinf(b) ) return 0; if ( a > b ) return 1; else return 0; } int insert_largest_into_list ( const float* value_list, int* index_list, const int N, const float value, const int index ) { if ( index == 0 ) { // first element to insert index_list[0] = 0; return 0; } int i, j, insert_at = -1; int limit = index; if ( limit > (N-1) ) limit = (N-1); for ( i=0; i < limit; ++i ) { if ( a_is_larger_than_b(value, value_list[index_list[i]]) ) { insert_at = i; int last = index_list[limit]; for ( j = limit; j > i; j-- ) { index_list[j] = index_list[j-1]; } if ( (limit+1) < N ) index_list[limit+1] = last; break; } } if ( insert_at < 0 ) { insert_at = limit; } index_list[insert_at] = index; return insert_at; } void get_smallest_magnitude_eigenvalues ( int N, int NEV, int num_calculated, float shift, float_complex* eigenvalues, float* eigenvectors, float* residuals, float_complex* temp_eigenvalues, const float* temp_eigenvectors, const float* temp_residuals ) { printf("pointers: %p %p %p %p %p %p\n", eigenvalues, eigenvectors, residuals, temp_eigenvalues, temp_eigenvectors, temp_residuals ); int i; int index; int smallest_index[num_calculated]; float magnitude[num_calculated]; float_complex v; float_complex l; for ( i = 0; i < num_calculated; ++i ) { v = temp_eigenvalues[i]; l = unshift(v, shift); printf("%d: %f +i%f : %f ::: %f +i%f\n", i, v.x, v.y, temp_residuals[i], l.x, l.y ); magnitude[i] = eigenvalue_magnitude ( l.x, l.y ); insert_smallest_into_list( magnitude, smallest_index, num_calculated, magnitude[i], i ); } int j = 0; i = 0; while ( i < NEV && j < num_calculated ) { index = smallest_index[j]; if ( 0 <= index && index < num_calculated ) { v = temp_eigenvalues[index]; l = unshift(v, shift); if ( v.x < 0 && l.x > 0 && temp_residuals[index] < 0.5 ) { eigenvalues[i] = l; residuals[i] = temp_residuals[index]; memcpy ( eigenvectors + N*i, temp_eigenvectors + N*index, N*sizeof(float) ); printf("using: %d : %f+j%f : %e\n", index, v.x, v.y, residuals[i] ); i++; } } j++; } } void calculate_eigen_values ( int N, void* DATA, int NEV, float shift, float* eigenvalues, float* eigenvectors, char* which ) { int i; int use_N_ev = 2*NEV; if ( use_N_ev > ( N/2 - 1 ) ) use_N_ev = N/2 - 1; // select the number of Arnoldi vectors to generate int NCV = 2*use_N_ev + 1; if ( NCV > N ) NCV = N; // allocate temporary storage for the vectors float* temp_ev = (float*)malloc ( NCV*2*sizeof(float) ); float* temp_vectors = (float*) malloc (N*NCV*sizeof(float)); float* temp_residuals = (float*)malloc ( (NCV )*sizeof(float)); float* residuals = (float*)malloc ( (use_N_ev)*sizeof(float)); // solve the eigenvalue problem using ARPACK arpack_ssev(N, (void*)DATA, use_N_ev, NCV, temp_ev, temp_vectors, temp_residuals, which ); // Copy the resultant eigenvalues memcpy(eigenvalues, temp_ev, NEV*2*sizeof(float)); memcpy(eigenvectors, temp_vectors, NEV*N*sizeof(float)); for (i=0; i < NEV; ++i) { printf("%d: %f\n", i, temp_residuals[i] ); } // free the temporary storage free ( temp_ev ); printf("1:\n"); free ( temp_vectors ); printf("2:\n"); free ( temp_residuals ); printf("3:\n"); free ( residuals ); printf("4:\n"); } void calculate_desired_eigenvalues ( int N, void* DATA, int NEV, float shift, float* eigenvalues, float* eigenvectors ) { // solve the eigenvalue problem using ARPACK int use_N_ev = 2*NEV; if ( use_N_ev > ( N/2 - 1 ) ) use_N_ev = N/2 - 1; if ( use_N_ev <= 0 ) use_N_ev = 1; // select the number of Arnoldi vectors to generate int NCV = 2*use_N_ev + 1; if ( NCV > N ) NCV = N; // allocate temporary storage for the vectors float_complex* temp_ev = (float_complex*)malloc ( NCV*sizeof(float_complex) ); float* temp_vectors = (float*) malloc (N*NCV*sizeof(float)); float* temp_residuals = (float*)malloc ( (NCV )*sizeof(float)); float* residuals = (float*)malloc ( (NCV)*sizeof(float)); #ifdef DEBUG_OUTPUT printf( "N=%d, use_N_ev = %d, NCV = %d, NEV = %d\n", N, use_N_ev, NCV, NEV ); #endif arpack_ssev( (int*)&N, (void*)DATA, &use_N_ev, &NCV, (float*)temp_ev, temp_vectors, temp_residuals, "LR" ); // free the temporary storage int largest_index[NCV]; float real[NCV]; float_complex v; float_complex l; int i; for ( i = 0; i < NCV; i++ ) { v = temp_ev[i]; l = invert (v); #ifdef DEBUG_OUTPUT printf("%d: %f+j%f :: %f(%f)+j%f :: %e\n", i, v.x, v.y, l.x, l.x + shift, l.y, temp_residuals[i]); #endif real[i] = v.x; insert_largest_into_list ( real, largest_index, NCV, real[i], i ); } int index, j = 0; i = 0; while ( i < NEV && j < NCV ) { index = largest_index[j]; if ( 0 <= index && index < NCV ) { v = temp_ev[index]; l = invert(v); float_complex t = invert(l); if ( temp_residuals[index] > 1e-9 && temp_residuals[index] < 0.5 && t.x > 1e-5 ) { eigenvalues[2*i] = l.x + shift; eigenvalues[2*i+1] = l.y; residuals[i] = temp_residuals[index]; memcpy ( eigenvectors + N*i, temp_vectors + N*index, N*sizeof(float) ); #ifdef DEBUG_OUTPUT printf("using: %d : %f+j%f : %e+j%e : %e (%f +j%f)\n", index, v.x, v.y, l.x, l.y, residuals[i], t.x, t.y ); #endif i++; } } j++; } free ( temp_ev ); free ( temp_vectors ); free ( temp_residuals ); free ( residuals ); }
FGP_TV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2019 Daniil Kazantsev * Copyright 2019 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "FGP_TV_core.h" /* C-OMP implementation of FGP-TV [1] denoising/regularization model (2D/3D case) * * Input Parameters: * 1. Noisy image/volume * 2. lambdaPar - regularization parameter * 3. Number of iterations * 4. eplsilon: tolerance constant * 5. TV-type: methodTV - 'iso' (0) or 'l1' (1) * 6. nonneg: 'nonnegativity (0 is OFF by default) * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's code and paper by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" */ float TV_FGP_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, int iterationsNumb, float epsil, int methodTV, int nonneg, int dimX, int dimY, int dimZ) { int ll; long j, DimTotal; float re, re1; re = 0.0f; re1 = 0.0f; float tk = 1.0f; float tkp1 =1.0f; int count = 0; if (dimZ <= 1) { /*2D case */ float *Output_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL; DimTotal = (long)(dimX*dimY); if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); P1_prev = calloc(DimTotal, sizeof(float)); P2_prev = calloc(DimTotal, sizeof(float)); R1 = calloc(DimTotal, sizeof(float)); R2 = calloc(DimTotal, sizeof(float)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /* computing the gradient of the objective function */ Obj_func2D(Input, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_func2D(P1, P2, Output, R1, R2, lambdaPar, (long)(dimX), (long)(dimY)); /* projection step */ Proj_func2D(P1, P2, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrtf(1.0f + 4.0f*tk*tk))*0.5f; Rupd_func2D(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, DimTotal); /*storing old values*/ copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), 1l); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), 1l); tk = tkp1; /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } if (epsil != 0.0f) free(Output_prev); free(P1); free(P2); free(P1_prev); free(P2_prev); free(R1); free(R2); } else { /*3D case*/ float *Output_prev=NULL, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL; DimTotal = (long)(dimX*dimY*dimZ); if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); P3 = calloc(DimTotal, sizeof(float)); P1_prev = calloc(DimTotal, sizeof(float)); P2_prev = calloc(DimTotal, sizeof(float)); P3_prev = calloc(DimTotal, sizeof(float)); R1 = calloc(DimTotal, sizeof(float)); R2 = calloc(DimTotal, sizeof(float)); R3 = calloc(DimTotal, sizeof(float)); /* begin iterations */ for(ll=0; ll<iterationsNumb; ll++) { if ((epsil != 0.0f) && (ll % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /* computing the gradient of the objective function */ Obj_func3D(Input, Output, R1, R2, R3, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* apply nonnegativity */ if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (Output[j] < 0.0f) Output[j] = 0.0f;} /*Taking a step towards minus of the gradient*/ Grad_func3D(P1, P2, P3, Output, R1, R2, R3, lambdaPar, (long)(dimX), (long)(dimY), (long)(dimZ)); /* projection step */ Proj_func3D(P1, P2, P3, methodTV, DimTotal); /*updating R and t*/ tkp1 = (1.0f + sqrtf(1.0f + 4.0f*tk*tk))*0.5f; Rupd_func3D(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, DimTotal); /* calculate norm - stopping rules*/ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } /*storing old values*/ copyIm(P1, P1_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P2, P2_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); copyIm(P3, P3_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); tk = tkp1; } if (epsil != 0.0f) free(Output_prev); free(P1); free(P2); free(P3); free(P1_prev); free(P2_prev); free(P3_prev); free(R1); free(R2); free(R3); } /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } float Obj_func2D(float *A, float *D, float *R1, float *R2, float lambda, long dimX, long dimY) { float val1, val2; long i,j,index; #pragma omp parallel for shared(A,D,R1,R2) private(index,i,j,val1,val2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(j-1)*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] - val1 - val2); }} return *D; } float Grad_func2D(float *P1, float *P2, float *D, float *R1, float *R2, float lambda, long dimX, long dimY) { float val1, val2, multip; long i,j,index; multip = (1.0f/(8.0f*lambda)); #pragma omp parallel for shared(P1,P2,D,R1,R2,multip) private(index,i,j,val1,val2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(j+1)*dimX + i]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; }} return 1; } float Rupd_func2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P1_old,P2_old,R1,R2,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); } return 1; } /* 3D-case related Functions */ /*****************************************************************/ float Obj_func3D(float *A, float *D, float *R1, float *R2, float *R3, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3; long i,j,k,index; #pragma omp parallel for shared(A,D,R1,R2,R3) private(index,i,j,k,val1,val2,val3) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == 0) {val1 = 0.0f;} else {val1 = R1[(dimX*dimY)*k + j*dimX + (i-1)];} if (j == 0) {val2 = 0.0f;} else {val2 = R2[(dimX*dimY)*k + (j-1)*dimX + i];} if (k == 0) {val3 = 0.0f;} else {val3 = R3[(dimX*dimY)*(k-1) + j*dimX + i];} D[index] = A[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); }}} return *D; } float Grad_func3D(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float lambda, long dimX, long dimY, long dimZ) { float val1, val2, val3, multip; long i,j,k, index; multip = (1.0f/(26.0f*lambda)); #pragma omp parallel for shared(P1,P2,P3,D,R1,R2,R3,multip) private(index,i,j,k,val1,val2,val3) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* boundary conditions */ if (i == dimX-1) val1 = 0.0f; else val1 = D[index] - D[(dimX*dimY)*k + j*dimX + (i+1)]; if (j == dimY-1) val2 = 0.0f; else val2 = D[index] - D[(dimX*dimY)*k + (j+1)*dimX + i]; if (k == dimZ-1) val3 = 0.0f; else val3 = D[index] - D[(dimX*dimY)*(k+1) + j*dimX + i]; P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; }}} return 1; } float Rupd_func3D(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, long DimTotal) { long i; float multip; multip = ((tk-1.0f)/tkp1); #pragma omp parallel for shared(P1,P2,P3,P1_old,P2_old,P3_old,R1,R2,R3,multip) private(i) for(i=0; i<DimTotal; i++) { R1[i] = P1[i] + multip*(P1[i] - P1_old[i]); R2[i] = P2[i] + multip*(P2[i] - P2_old[i]); R3[i] = P3[i] + multip*(P3[i] - P3_old[i]); } return 1; }
symm_x_dia_u_lo_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < mat->rows; r++) for(ALPHA_INT c = 0; c < columns; c++){ alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta); alpha_madde(y[index2(r,c,ldy)],x[index2(r,c,ldx)],alpha); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid,num_threads,columns); ALPHA_INT bch = cross_block_high(tid,num_threads,columns); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d < 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul_2c(val,mat->values[index2(di,ar,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(y[index2(ar,bc,ldy)],val,x[index2(ac,bc,ldx)]); alpha_madde(y[index2(ac,bc,ldy)],val,x[index2(ar,bc,ldx)]); } } } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
10.norace3.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> int main() { int x = 100, y = 200; #pragma omp parallel num_threads(8) { #pragma omp sections firstprivate(x) private(y) { { y = x * 3; } #pragma omp section { y = 4 * x; x = y - x; } } } return 0; } // CHECK: Region is Data Race Free. // END
GB_unop__identity_uint16_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint64) // op(A') function: GB (_unop_tran__identity_uint16_uint64) // C type: uint16_t // A type: uint64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint64) ( uint16_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
clim_img_bitmap.h
#ifndef CLIM_IMG_BITMAP_H #define CLIM_IMG_BITMAP_H #include "clim_platform_detector.h" #ifdef CLIM_COMPILER_MSVC #pragma once #endif #include <stdlib.h> #include <math.h> #include "clim_base.h" #include "clim_utils.h" #if defined(CLIM_COMPILER_GCC) || defined(CLIM_COMPILER_CLANG) typedef struct { uint16_t signature; uint32_t filesize; uint32_t reserved; uint32_t pixel_data_offset; } __attribute__((packed)) clim_bitmap_file_header_t; typedef struct { uint32_t size; int32_t width; int32_t height; uint16_t planes; uint16_t bits_per_pixel; uint32_t compression; uint32_t img_size; int32_t x_pixels_per_meter; int32_t y_pixels_per_meter; uint32_t colors_used; uint32_t colors_important; } __attribute__((packed)) clim_bitmap_info_header_t; #elif CLIM_COMPILER_MSVC #pragma pack(push, 1) typedef struct { uint16_t signature; uint32_t filesize; uint32_t reserved; uint32_t pixel_data_offset; } clim_bitmap_file_header_t; #pragma pack(pop) #pragma pack(push, 1) typedef struct { uint32_t size; int32_t width; int32_t height; uint16_t planes; uint16_t bits_per_pixel; uint32_t compression; uint32_t img_size; int32_t x_pixels_per_meter; int32_t y_pixels_per_meter; uint32_t colors_used; uint32_t colors_important; } clim_bitmap_info_header_t; #pragma pack(pop) #endif #define CLIM_BMP_ABS(x) (((x) < 0) ? (-x) : (x)) clim_errcode_t clim_img_bmp_load(const uint8_t* buffer, const size_t buffer_len, clim_img_ctx_t* pctx) { if (!buffer) return CLIM_EC_INVALID_PARAMETERS; // TODO(Garcia): Type Punning UB if (*(const uint16_t *)buffer != 0x4D42) return CLIM_EC_INVALID_BITMAP_FILE; pctx->format = CLIM_IMAGE_FORMAT_BMP; const uint8_t* save_point = buffer; buffer += offsetof(clim_bitmap_file_header_t, pixel_data_offset); uint8_t pixel_offset = *buffer; clim_bitmap_info_header_t info_header = { 0 }; memcpy(&info_header, save_point + sizeof(clim_bitmap_file_header_t), sizeof(clim_bitmap_info_header_t)); pctx->data.width = (uint32_t)(CLIM_BMP_ABS(info_header.width)); pctx->data.height = (uint32_t)(CLIM_BMP_ABS(info_header.height)); pctx->data.bytes_per_pixel = (uint8_t) (info_header.bits_per_pixel >> 3U); const uint8_t bpp = pctx->data.bytes_per_pixel; if (bpp != 3 && bpp != 4) return CLIM_EC_UNSUPPORTED_BITMAP_BPP; buffer = save_point + pixel_offset; const size_t rowsize = pctx->data.width * pctx->data.bytes_per_pixel; const uint8_t padd = ((4U - (rowsize & 3U)) & 3U); const uint8_t mem_bpp = sizeof(clim_pixelcolor_t); const size_t mem_len = pctx->data.height * pctx->data.width * mem_bpp; uint8_t* img_pixels = (uint8_t *) clim_mem_alloc(mem_len, false); const bool is_32_bits_per_pixel = (bpp == 4U); uint32_t width = pctx->data.width; uint32_t height = pctx->data.height; // #pragma omp parallel for for (uint32_t y = 0; y < height; ++y) { for (uint32_t x = 0; x < width; ++x) { img_pixels[mem_bpp * (y * width + x) + 3U] = *buffer++ & UINT8_MAX; img_pixels[mem_bpp * (y * width + x) + 2U] = *buffer++ & UINT8_MAX; img_pixels[mem_bpp * (y * width + x) + 1U] = *buffer++ & UINT8_MAX; img_pixels[mem_bpp * (y * width + x) + 0U] = is_32_bits_per_pixel ? (*buffer++ & UINT8_MAX) : (0xffu); } if (!is_32_bits_per_pixel) buffer += padd; } pctx->data.pixels = img_pixels; return CLIM_EC_SUCCESS; } clim_errcode_t clim_img_bmp_write(const char* filepath, const clim_img_ctx_t* pctx) { CLIM_ASSERT(pctx && filepath); if (pctx->data.bytes_per_pixel != 3 && pctx->data.bytes_per_pixel != 4) return CLIM_EC_UNSUPPORTED_BITMAP_BPP; const uint32_t rowsize = pctx->data.width * pctx->data.bytes_per_pixel; const uint8_t padd = ((4 - (rowsize & 3)) & 3); const size_t len = pctx->data.height * (rowsize + padd); clim_bitmap_file_header_t ctx_header_file = { .signature = 0x4D42, .filesize = (uint32_t)(len + sizeof(clim_bitmap_file_header_t) + sizeof(clim_bitmap_info_header_t)), .pixel_data_offset = (uint32_t)(sizeof(clim_bitmap_file_header_t) + sizeof(clim_bitmap_info_header_t)) }; clim_bitmap_info_header_t ctx_header_info = { .size = sizeof(clim_bitmap_info_header_t), .width = (int32_t)(pctx->data.width), .height = (int32_t)(pctx->data.height), .bits_per_pixel = (uint16_t)(pctx->data.bytes_per_pixel << 3), .img_size = (uint32_t)(len), .planes = 1U }; FILE* file_image = fopen(filepath, "wb"); if (!file_image) return CLIM_EC_CANNOT_WRITE_FILE; fwrite(&ctx_header_file, sizeof(ctx_header_file), 1, file_image); fwrite(&ctx_header_info, sizeof(ctx_header_info), 1, file_image); const uint8_t bpp = pctx->data.bytes_per_pixel; const uint8_t mem_bpp = sizeof(clim_pixelcolor_t); uint8_t* pixels = pctx->data.pixels; const uint32_t height = pctx->data.height; const uint32_t width = pctx->data.width; if (bpp == 4U) { clim_bgr2rgb(pixels, mem_bpp, pixels + (pctx->data.width * pctx->data.height * mem_bpp)); // TODO(Garcia): Ensure return write size fwrite(pctx->data.pixels, len, sizeof(uint8_t), file_image); } else { // #pragma omp parallel for for (uint32_t y = 0U; y < height; ++y) { for (uint32_t x = 0U; x < width; ++x) { fputc(pixels[mem_bpp * (y * width + x) + 3U], file_image); // B fputc(pixels[mem_bpp * (y * width + x) + 2U], file_image); // G fputc(pixels[mem_bpp * (y * width + x) + 1U], file_image); // R } fwrite(&(uint8_t){0x0}, sizeof(uint8_t), padd, file_image); } } clim_fclose(file_image); return CLIM_EC_SUCCESS; } #endif
GB_subassign_01.c
//------------------------------------------------------------------------------ // GB_subassign_01: C(I,J) = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 01: C(I,J) = scalar ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_01 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; const bool C_is_hyper = C->is_hyper ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const int64_t Cnvec = C->nvec ; GB_GET_SCALAR ; GB_GET_S ; const int64_t *restrict Sh = S->h ; const int64_t Snvec = S->nvec ; const bool S_is_hyper = S->is_hyper ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 01: C(I,J) = scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required. // Entries in S are found and the corresponding entry in C replaced with // the scalar. The traversal of S is identical to the traversal of M in // Method 4. // Method 01 and Method 03 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (Si [pS] == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) task_pending++ ; } else { // ----[C A 1] or [X A 1]----------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =A ): scalar to C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_noaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC) = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { bool found = (pS < pS_end) && (Si [pS] == iA) ; if (!found) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, the scalar is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
ExtraRocksDBController.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_SPANN_EXTRADBSEARCHER_H_ #define _SPTAG_SPANN_EXTRADBSEARCHER_H_ #include "inc/Helper/VectorSetReader.h" #include "inc/Helper/AsyncFileReader.h" #include "IExtraSearcher.h" #include "ExtraFullGraphSearcher.h" #include "../Common/TruthSet.h" #include "inc/Helper/KeyValueIO.h" #include "rocksdb/db.h" #include "rocksdb/slice.h" #include "rocksdb/options.h" #include "rocksdb/merge_operator.h" #include <map> #include <cmath> #include <climits> #include <future> namespace SPTAG::SPANN { inline bool sort_docid_cmp(const Edge& a, const Edge& b) { return a.tonode < b.tonode; } class RocksDBIO : public Helper::KeyValueIO { public: RocksDBIO() = default; ~RocksDBIO() override { db->Close(); DestroyDB(dbPath, dbOptions); delete db; } bool Initialize(const char* filePath) override { dbPath = std::string(filePath); dbOptions.create_if_missing = true; dbOptions.IncreaseParallelism(); dbOptions.OptimizeLevelStyleCompaction(); dbOptions.merge_operator.reset(new AnnMergeOperator); auto s = rocksdb::DB::Open(dbOptions, dbPath, &db); LOG(Helper::LogLevel::LL_Info, "SPFresh: New Rocksdb: %s\n", filePath); return s == rocksdb::Status::OK(); } void ShutDown() override { db->Close(); DestroyDB(dbPath, dbOptions); delete db; } ErrorCode Get(const std::string& key, std::string* value) override { auto s = db->Get(rocksdb::ReadOptions(), key, value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Get(SizeType key, std::string* value) override { return Get(Helper::Convert::Serialize<SizeType>(&key), value); } ErrorCode Put(const std::string& key, const std::string& value) override { auto s = db->Put(rocksdb::WriteOptions(), key, value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Put(SizeType key, const std::string& value) override { return Put(Helper::Convert::Serialize<SizeType>(&key), value); } ErrorCode Put(SizeType key, SizeType id, const void* vector, SizeType dim) override { using Helper::Convert::Serialize; std::string posting(Serialize<SizeType>(&id) + Serialize<SizeType>(vector, dim)); return Put(key, posting); } class AnnMergeOperator : public rocksdb::AssociativeMergeOperator { public: bool Merge(const rocksdb::Slice& key, const rocksdb::Slice* existing_value, const rocksdb::Slice& value, std::string* new_value, rocksdb::Logger* logger) const override { std::string newPosting; if(existing_value) { newPosting += (*existing_value).ToString(); newPosting += value.ToString(); } else { newPosting += value.ToString(); } *new_value = newPosting; return true; } const char* Name() const override { return "AnnMergeOperator"; } }; ErrorCode Merge(SizeType key, const std::string& value) { if (value.empty()) { LOG(Helper::LogLevel::LL_Error, "Error! empty append posting!\n"); } auto s = db->Merge(rocksdb::WriteOptions(), Helper::Convert::Serialize<int>(&key, 1), value); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } ErrorCode Delete(SizeType key) override { auto s = db->Delete(rocksdb::WriteOptions(), Helper::Convert::Serialize<int>(&key, 1)); if (s == rocksdb::Status::OK()) { return ErrorCode::Success; } else { return ErrorCode::Fail; } } void ForceCompaction() { LOG(Helper::LogLevel::LL_Info, "Start Compaction\n"); db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr); LOG(Helper::LogLevel::LL_Info, "Finish Compaction\n"); } private: std::string dbPath; rocksdb::DB* db{}; rocksdb::Options dbOptions; }; template <typename ValueType> class ExtraRocksDBController : public IExtraSearcher { private: RocksDBIO db; std::atomic_uint64_t m_postingNum{}; public: ExtraRocksDBController(const char* dbPath, int dim) { db.Initialize(dbPath); m_vectorInfoSize = dim * sizeof(ValueType) + sizeof(int);} ~ExtraRocksDBController() override = default; bool LoadIndex(Options& p_opt) override { /* m_extraFullGraphFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; std::string curFile = m_extraFullGraphFile; do { auto curIndexFile = f_createAsyncIO(); if (curIndexFile == nullptr || !curIndexFile->Initialize(curFile.c_str(), std::ios::binary | std::ios::in, #ifdef BATCH_READ p_opt.m_searchInternalResultNum, 2, 2, p_opt.m_iSSDNumberOfThreads #else p_opt.m_searchInternalResultNum * p_opt.m_iSSDNumberOfThreads / p_opt.m_ioThreads + 1, 2, 2, p_opt.m_ioThreads #endif )) { LOG(Helper::LogLevel::LL_Error, "Cannot open file:%s!\n", curFile.c_str()); return false; } m_indexFiles.emplace_back(curIndexFile); m_listInfos.emplace_back(0); m_totalListCount += LoadingHeadInfo(curFile, p_opt.m_searchPostingPageLimit, m_listInfos.back()); curFile = m_extraFullGraphFile + "_" + std::to_string(m_indexFiles.size()); } while (fileexists(curFile.c_str())); m_listPerFile = static_cast<int>((m_totalListCount + m_indexFiles.size() - 1) / m_indexFiles.size()); #ifndef _MSC_VER Helper::AIOTimeout.tv_nsec = p_opt.m_iotimeout * 1000; #endif */ return true; } virtual void SearchIndex(ExtraWorkSpace* p_exWorkSpace, QueryResult& p_queryResults, std::shared_ptr<VectorIndex> p_index, SearchStats* p_stats, const COMMON::Labelset& m_deletedID, std::set<int>* truth, std::map<int, std::set<int>>* found) override { const auto postingListCount = static_cast<uint32_t>(p_exWorkSpace->m_postingIDs.size()); p_exWorkSpace->m_deduper.clear(); COMMON::QueryResultSet<ValueType>& queryResults = *((COMMON::QueryResultSet<ValueType>*)&p_queryResults); int diskRead = 0; int diskIO = 0; int listElements = 0; for (uint32_t pi = 0; pi < postingListCount; ++pi) { auto curPostingID = p_exWorkSpace->m_postingIDs[pi]; std::string postingList; SearchIndex(curPostingID, postingList); int vectorNum = postingList.size() / m_vectorInfoSize; diskIO++; diskRead++; listElements += vectorNum; for (int i = 0; i < vectorNum; i++) { char* vectorInfo = postingList.data() + i * m_vectorInfoSize; int vectorID = *(reinterpret_cast<int*>(vectorInfo)); if (m_deletedID.Contains(vectorID) || p_exWorkSpace->m_deduper.CheckAndSet(vectorID)) continue; auto distance2leaf = p_index->ComputeDistance(queryResults.GetQuantizedTarget(), vectorInfo + sizeof(int)); queryResults.AddPoint(vectorID, distance2leaf); } if (truth) { for (int i = 0; i < vectorNum; ++i) { char* vectorInfo = postingList.data() + i * m_vectorInfoSize; int vectorID = *(reinterpret_cast<int*>(vectorInfo)); if (truth->count(vectorID) != 0) (*found)[curPostingID].insert(vectorID); } } } if (p_stats) { p_stats->m_totalListElementsCount = listElements; p_stats->m_diskIOCount = diskIO; p_stats->m_diskAccessCount = diskRead; } } bool BuildIndex(std::shared_ptr<Helper::VectorSetReader>& p_reader, std::shared_ptr<VectorIndex> p_headIndex, Options& p_opt) override { std::string outputFile = p_opt.m_indexDirectory + FolderSep + p_opt.m_ssdIndex; if (outputFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Output file can't be empty!\n"); return false; } int numThreads = p_opt.m_iSSDNumberOfThreads; int candidateNum = p_opt.m_internalResultNum; std::unordered_set<SizeType> headVectorIDS; if (p_opt.m_headIDFile.empty()) { LOG(Helper::LogLevel::LL_Error, "Not found VectorIDTranslate!\n"); return false; } { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize((p_opt.m_indexDirectory + FolderSep + p_opt.m_headIDFile).c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "failed open VectorIDTranslate: %s\n", p_opt.m_headIDFile.c_str()); return false; } std::uint64_t vid; while (ptr->ReadBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) == sizeof(vid)) { headVectorIDS.insert(static_cast<SizeType>(vid)); } LOG(Helper::LogLevel::LL_Info, "Loaded %u Vector IDs\n", static_cast<uint32_t>(headVectorIDS.size())); } SizeType fullCount = 0; size_t vectorInfoSize = 0; { auto fullVectors = p_reader->GetVectorSet(); fullCount = fullVectors->Count(); vectorInfoSize = fullVectors->PerVectorDataSize() + sizeof(int); } Selection selections(static_cast<size_t>(fullCount) * p_opt.m_replicaCount, p_opt.m_tmpdir); LOG(Helper::LogLevel::LL_Info, "Full vector count:%d Edge bytes:%llu selection size:%zu, capacity size:%zu\n", fullCount, sizeof(Edge), selections.m_selections.size(), selections.m_selections.capacity()); std::vector<std::atomic_int> replicaCount(fullCount); std::vector<std::atomic_int> postingListSize(headVectorIDS.size()); for (auto& pls : postingListSize) pls = 0; std::unordered_set<SizeType> emptySet; SizeType batchSize = (fullCount + p_opt.m_batches - 1) / p_opt.m_batches; auto t1 = std::chrono::high_resolution_clock::now(); if (p_opt.m_batches > 1) selections.SaveBatch(); { LOG(Helper::LogLevel::LL_Info, "Preparation done, start candidate searching.\n"); SizeType sampleSize = p_opt.m_samples; std::vector<SizeType> samples(sampleSize, 0); for (int i = 0; i < p_opt.m_batches; i++) { SizeType start = i * batchSize; SizeType end = min(start + batchSize, fullCount); auto fullVectors = p_reader->GetVectorSet(start, end); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); emptySet.clear(); int sampleNum = 0; for (int j = start; j < end && sampleNum < sampleSize; j++) { if (headVectorIDS.count(j) == 0) samples[sampleNum++] = j - start; } float acc = 0; #pragma omp parallel for schedule(dynamic) for (int j = 0; j < sampleNum; j++) { COMMON::Utils::atomic_float_add(&acc, COMMON::TruthSet::CalculateRecall(p_headIndex.get(), fullVectors->GetVector(samples[j]), candidateNum)); } acc = acc / sampleNum; LOG(Helper::LogLevel::LL_Info, "Batch %d vector(%d,%d) loaded with %d vectors (%zu) HeadIndex acc @%d:%f.\n", i, start, end, fullVectors->Count(), selections.m_selections.size(), candidateNum, acc); p_headIndex->ApproximateRNG(fullVectors, emptySet, candidateNum, selections.m_selections.data(), p_opt.m_replicaCount, numThreads, p_opt.m_gpuSSDNumTrees, p_opt.m_gpuSSDLeafSize, p_opt.m_rngFactor, p_opt.m_numGPUs); for (SizeType j = start; j < end; j++) { replicaCount[j] = 0; size_t vecOffset = j * (size_t)p_opt.m_replicaCount; for (int resNum = 0; resNum < p_opt.m_replicaCount && selections[vecOffset + resNum].node != INT_MAX; resNum++) { ++postingListSize[selections[vecOffset + resNum].node]; selections[vecOffset + resNum].tonode = j; ++replicaCount[j]; } } if (p_opt.m_batches > 1) selections.SaveBatch(); } } auto t2 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Searching replicas ended. Search Time: %.2lf mins\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count()) / 60.0); if (p_opt.m_batches > 1) selections.LoadBatch(0, static_cast<size_t>(fullCount) * p_opt.m_replicaCount); // Sort results either in CPU or GPU VectorIndex::SortSelections(&selections.m_selections); auto t3 = std::chrono::high_resolution_clock::now(); LOG(Helper::LogLevel::LL_Info, "Time to sort selections:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t3 - t2).count()) / 1000); if (p_opt.m_postingPageLimit > 0) { m_postingSizeLimit = static_cast<int>(p_opt.m_postingPageLimit * PageSize / vectorInfoSize); } LOG(Helper::LogLevel::LL_Info, "Posting size limit: %d\n", m_postingSizeLimit); auto postingSizeLimit = m_postingSizeLimit; { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); for (int i = 0; i < replicaCount.size(); ++i) { ++replicaCountDist[replicaCount[i]]; } LOG(Helper::LogLevel::LL_Info, "Before Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } #pragma omp parallel for schedule(dynamic) for (int i = 0; i < postingListSize.size(); ++i) { std::size_t selectIdx = std::lower_bound(selections.m_selections.begin(), selections.m_selections.end(), i, Selection::g_edgeComparer) - selections.m_selections.begin(); if (postingListSize[i] <= postingSizeLimit) { std::sort(selections.m_selections.begin() + selectIdx, selections.m_selections.begin() + selectIdx + postingListSize[i], sort_docid_cmp); continue; } for (size_t dropID = postingSizeLimit; dropID < postingListSize[i]; ++dropID) { int tonode = selections.m_selections[selectIdx + dropID].tonode; --replicaCount[tonode]; } postingListSize[i] = postingSizeLimit; std::sort(selections.m_selections.begin() + selectIdx, selections.m_selections.begin() + selectIdx + postingListSize[i], sort_docid_cmp); } if (p_opt.m_outputEmptyReplicaID) { std::vector<int> replicaCountDist(p_opt.m_replicaCount + 1, 0); auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize("EmptyReplicaID.bin", std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Fail to create EmptyReplicaID.bin!\n"); return false; } for (int i = 0; i < replicaCount.size(); ++i) { ++replicaCountDist[replicaCount[i]]; if (replicaCount[i] < 2) { long long vid = i; if (ptr->WriteBinary(sizeof(vid), reinterpret_cast<char*>(&vid)) != sizeof(vid)) { LOG(Helper::LogLevel::LL_Error, "Failt to write EmptyReplicaID.bin!"); return false; } } } LOG(Helper::LogLevel::LL_Info, "After Posting Cut:\n"); for (int i = 0; i < replicaCountDist.size(); ++i) { LOG(Helper::LogLevel::LL_Info, "Replica Count Dist: %d, %d\n", i, replicaCountDist[i]); } } auto t4 = std::chrono::high_resolution_clock::now(); LOG(SPTAG::Helper::LogLevel::LL_Info, "Time to perform posting cut:%.2lf sec.\n", ((double)std::chrono::duration_cast<std::chrono::seconds>(t4 - t3).count()) + ((double)std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count()) / 1000); if (p_opt.m_ssdIndexFileNum > 1) selections.SaveBatch(); auto fullVectors = p_reader->GetVectorSet(); if (p_opt.m_distCalcMethod == DistCalcMethod::Cosine && !p_reader->IsNormalized()) fullVectors->Normalize(p_opt.m_iSSDNumberOfThreads); for (int id = 0; id < postingListSize.size(); id++) { std::string postinglist; std::size_t selectIdx = selections.lower_bound(id); for (int j = 0; j < postingListSize[id]; ++j) { if (selections[selectIdx].node != id) { LOG(Helper::LogLevel::LL_Error, "Selection ID NOT MATCH\n"); exit(1); } int fullID = selections[selectIdx++].tonode; size_t dim = fullVectors->Dimension(); // First Vector ID, then Vector postinglist += Helper::Convert::Serialize<int>(&fullID, 1); postinglist += Helper::Convert::Serialize<ValueType>(fullVectors->GetVector(fullID), dim); } AddIndex(id, postinglist); } auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize(p_opt.m_ssdInfoFile.c_str(), std::ios::binary | std::ios::out)) { LOG(Helper::LogLevel::LL_Error, "Failed open file %s\n", p_opt.m_ssdInfoFile.c_str()); exit(1); } //Number of all documents. int i32Val = static_cast<int>(fullCount); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } //Number of postings i32Val = static_cast<int>(postingListSize.size()); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } for(int id = 0; id < postingListSize.size(); id++) { i32Val = postingListSize[id].load(); if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) { LOG(Helper::LogLevel::LL_Error, "Failed to write SSDIndexInfo File!"); exit(1); } } LOG(Helper::LogLevel::LL_Info, "SPFresh: initialize deleteMap\n"); COMMON::Labelset m_deleteID; m_deleteID.Initialize(fullCount, p_headIndex->m_iDataBlockSize, p_headIndex->m_iDataCapacity); LOG(Helper::LogLevel::LL_Info, "SPFresh: save deleteMap\n"); m_deleteID.Save(p_opt.m_fullDeletedIDFile); auto t5 = std::chrono::high_resolution_clock::now(); double elapsedSeconds = std::chrono::duration_cast<std::chrono::seconds>(t5 - t1).count(); LOG(Helper::LogLevel::LL_Info, "Total used time: %.2lf minutes (about %.2lf hours).\n", elapsedSeconds / 60.0, elapsedSeconds / 3600.0); return true; } ErrorCode AppendPosting(SizeType headID, const std::string& appendPosting) override { if (appendPosting.empty()) { LOG(Helper::LogLevel::LL_Error, "Error! empty append posting!\n"); } return db.Merge(headID, appendPosting); } void ForceCompaction() override { db.ForceCompaction(); } inline ErrorCode SearchIndex(SizeType headID, std::string& posting) override { return db.Get(headID, &posting); } inline ErrorCode AddIndex(SizeType headID, const std::string& posting) override { m_postingNum++; return db.Put(headID, posting); } inline ErrorCode DeleteIndex(SizeType headID) override { m_postingNum--; return db.Delete(headID); } inline ErrorCode OverrideIndex(SizeType headID, const std::string& posting) override { return db.Put(headID, posting); } inline SizeType GetIndexSize() override { return m_postingNum; } inline SizeType GetPostingSizeLimit() override { return m_postingSizeLimit; } private: struct ListInfo { int listEleCount = 0; std::uint16_t listPageCount = 0; std::uint64_t listOffset = 0; std::uint16_t pageOffset = 0; }; int LoadingHeadInfo(const std::string& p_file, int p_postingPageLimit, std::vector<ListInfo>& m_listInfos) { auto ptr = SPTAG::f_createIO(); if (ptr == nullptr || !ptr->Initialize(p_file.c_str(), std::ios::binary | std::ios::in)) { LOG(Helper::LogLevel::LL_Error, "Failed to open file: %s\n", p_file.c_str()); exit(1); } int m_listCount; int m_totalDocumentCount; int m_iDataDimension; int m_listPageOffset; if (ptr->ReadBinary(sizeof(m_listCount), reinterpret_cast<char*>(&m_listCount)) != sizeof(m_listCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_totalDocumentCount), reinterpret_cast<char*>(&m_totalDocumentCount)) != sizeof(m_totalDocumentCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_iDataDimension), reinterpret_cast<char*>(&m_iDataDimension)) != sizeof(m_iDataDimension)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listPageOffset), reinterpret_cast<char*>(&m_listPageOffset)) != sizeof(m_listPageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (m_vectorInfoSize == 0) m_vectorInfoSize = m_iDataDimension * sizeof(ValueType) + sizeof(int); else if (m_vectorInfoSize != m_iDataDimension * sizeof(ValueType) + sizeof(int)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file! DataDimension and ValueType are not match!\n"); exit(1); } m_listInfos.resize(m_listCount); size_t totalListElementCount = 0; std::map<int, int> pageCountDist; size_t biglistCount = 0; size_t biglistElementCount = 0; int pageNum; for (int i = 0; i < m_listCount; ++i) { if (ptr->ReadBinary(sizeof(pageNum), reinterpret_cast<char*>(&(pageNum))) != sizeof(pageNum)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].pageOffset), reinterpret_cast<char*>(&(m_listInfos[i].pageOffset))) != sizeof(m_listInfos[i].pageOffset)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listEleCount), reinterpret_cast<char*>(&(m_listInfos[i].listEleCount))) != sizeof(m_listInfos[i].listEleCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } if (ptr->ReadBinary(sizeof(m_listInfos[i].listPageCount), reinterpret_cast<char*>(&(m_listInfos[i].listPageCount))) != sizeof(m_listInfos[i].listPageCount)) { LOG(Helper::LogLevel::LL_Error, "Failed to read head info file!\n"); exit(1); } m_listInfos[i].listOffset = (static_cast<uint64_t>(m_listPageOffset + pageNum) << PageSizeEx); m_listInfos[i].listEleCount = min(m_listInfos[i].listEleCount, (min(static_cast<int>(m_listInfos[i].listPageCount), p_postingPageLimit) << PageSizeEx) / m_vectorInfoSize); m_listInfos[i].listPageCount = static_cast<std::uint16_t>(ceil((m_vectorInfoSize * m_listInfos[i].listEleCount + m_listInfos[i].pageOffset) * 1.0 / (1 << PageSizeEx))); totalListElementCount += m_listInfos[i].listEleCount; int pageCount = m_listInfos[i].listPageCount; if (pageCount > 1) { ++biglistCount; biglistElementCount += m_listInfos[i].listEleCount; } if (pageCountDist.count(pageCount) == 0) { pageCountDist[pageCount] = 1; } else { pageCountDist[pageCount] += 1; } } LOG(Helper::LogLevel::LL_Info, "Finish reading header info, list count %d, total doc count %d, dimension %d, list page offset %d.\n", m_listCount, m_totalDocumentCount, m_iDataDimension, m_listPageOffset); LOG(Helper::LogLevel::LL_Info, "Big page (>4K): list count %zu, total element count %zu.\n", biglistCount, biglistElementCount); LOG(Helper::LogLevel::LL_Info, "Total Element Count: %llu\n", totalListElementCount); for (auto& ele : pageCountDist) { LOG(Helper::LogLevel::LL_Info, "Page Count Dist: %d %d\n", ele.first, ele.second); } return m_listCount; } private: std::string m_extraFullGraphFile; // std::vector<std::vector<ListInfo>> m_listInfos; std::vector<std::shared_ptr<Helper::DiskPriorityIO>> m_indexFiles; int m_vectorInfoSize = 0; // int m_totalListCount = 0; // int m_listPerFile = 0; int m_postingSizeLimit = INT_MAX; }; } // namespace SPTAG #endif // _SPTAG_SPANN_EXTRADBSEARCHER_H_
Network.h
/* * Network.h * * Created by Guido Novati on 30.10.18. * Copyright 2018 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" struct Network { std::mt19937 gen; // Vector of layers, each defines a forward and bckward operation: std::vector<Layer*> layers; // Vector of parameters of each layer (two vectors must have the same size) // Each Params contains the matrices of parameters needed by the corresp layer std::vector<Params*> params; // Vector of grads for each parameter. By definition they have the same size std::vector<Params*> grads; // Memory space where each layer can compute its output and gradient: std::vector<Activation*> workspace; // Number of inputs to the network: int nInputs = 0; // Number of network outputs: int nOutputs = 0; size_t alloc_batchSize = 0; Network(const int seed = 0) : gen(seed) {}; void forward( std::vector<std::vector<Real>>& O, // one vector of input for each element in the mini-batch: const std::vector<std::vector<Real>> I, // layer ID at which to start forward operation: const size_t layerStart = 0 // (zero means compute from input to output) ) { if(params.size()==0 || grads.size()==0 || layers.size()==0) { printf("Attempted to access uninitialized network. Aborting\n"); abort(); } // input is a minibatch of datapoints: one vector for each datapoint: const size_t batchSize = I.size(); // allocate workspaces where we can write output of each layer if (batchSize not_eq alloc_batchSize) { clearWorkspace(); alloc_batchSize = batchSize; workspace = allocateActivation(batchSize); } // User can overwrite the output of any upper layer (marked by layerStart) // in order to see what happens if layer layerStart has a predefined output. // ( this allows visualizing PCA components! ) const int inputLayerSize = workspace[layerStart]->layersSize; //copy input onto output of input layer: #pragma omp parallel for schedule(static) for (size_t b=0; b<batchSize; b++) { assert(I[b].size() == (size_t) inputLayerSize ); // Input to the network is the output of input layer. // Respective workspace is a matrix of size [batchSize]x[nInputs] // Here we use row-major ordering: nInputs is the number of columns. Real* const input_b = workspace[layerStart]->output + b * inputLayerSize; // copy from function argument to workspace: std::copy(I[b].begin(), I[b].end(), input_b); } // Start from layer after input. E.g. Input layer is 0. No need to backprop // input layer has it has no parameters. for (size_t j=layerStart+1; j<layers.size(); j++) layers[j]->forward(workspace, params); #pragma omp parallel for schedule(static) for (size_t b=0; b<batchSize; b++) { assert(nOutputs == workspace.back()->layersSize); // network output is the output of last layer. // Respective workspace is a matrix of size [batchSize]x[nOutputs] // Here we use row-major ordering: nOutputs is the number of columns. Real* const output_b = workspace.back()->output + b * nOutputs; // copy from function argument to workspace: std::copy(output_b, output_b + nOutputs, O[b].begin()); } } void bckward( // vector of size of mini-batch of gradients of error wrt to network output const std::vector<std::vector<Real>> E, // layer ID at which forward operation was started: const size_t layerStart=0 // (zero means compute from input to output) ) const { //this function assumes that we already called forward propagation if(params.size()==0 || grads.size()==0 || layers.size()==0) { printf("Attempted to access uninitialized network. Aborting\n"); abort(); } // input is a minibatch of datapoints: one vector for each datapoint: const size_t batchSize = E.size(); assert( (size_t) workspace.back()->batchSize == batchSize); //copy input onto output of input layer: #pragma omp parallel for schedule(static) for (size_t b=0; b<batchSize; b++) { assert(E[b].size() == (size_t) nOutputs); // Write d Err / d Out onto last layer of the network. // Respective workspace is a matrix of size [batchSize]x[nOutputs] // Here we use row-major ordering: nOutputs is the number of columns. Real* const errors_b = workspace.back()->dError_dOutput + b * nOutputs; // copy from function argument to workspace: std::copy(E[b].begin(), E[b].end(), errors_b); } // Backprop starts at the last layer, which computes gradient of error wrt // to its parameters and gradient of error wrt to it's input. // Last layer to backprop is the one above input layer. Eg. if layerStart=0 // Then input layer was 0, which has no parametes and has no inputs to // backprp the error grad to, last layer to backprop is layer 1. for (size_t i = layers.size()-1; i >= layerStart + 1; i--) layers[i]->bckward(workspace, params, grads); } // Helper function for forward with batchsize = 1 std::vector<Real> forward(const std::vector<Real>I, const size_t layerStart=0) { std::vector<std::vector<Real>> vecO (1, std::vector<Real>(nOutputs)); const std::vector<std::vector<Real>> vecI (1, I); forward(vecO, vecI, layerStart); return vecO[0]; } // Helper function for forward with batchsize = 1) void bckward(const std::vector<Real> E, const size_t layerStart = 0) const { std::vector<std::vector<Real>> vecE (1, E); bckward(vecE, layerStart); } void save() const { for(const auto &l : layers) l->save(params); } void restart() const { for(const auto &l : layers) l->restart(params); } ~Network() { for(auto& p : grads) _dispose_object(p); for(auto& p : params) _dispose_object(p); for(auto& p : layers) _dispose_object(p); for(auto& p : workspace) _dispose_object(p); } inline void clearWorkspace() { for(auto& p : workspace) _dispose_object(p); workspace.clear(); } // Function to loop over layers and allocate workspace for network operations: inline std::vector<Activation*> allocateActivation(size_t batchSize) const { std::vector<Activation*> ret(layers.size(), nullptr); for(size_t j=0; j<layers.size(); j++) ret[j] = layers[j]->allocateActivation(batchSize); return ret; } // Function to loop over layers and allocate memory space for parameter grads: inline std::vector<Params*> allocateGrad() const { std::vector<Params*> ret(layers.size(), nullptr); for(size_t j=0; j<layers.size(); j++) ret[j] = layers[j]->allocate_params(); return ret; } ////////////////////////////////////////////////////////////////////////////// /// Functions to build the network are defined in Network_buildFunctions.h /// ////////////////////////////////////////////////////////////////////////////// template<int size> void addInput(); template<int nInputs, int size> void addLinear(const std::string fname = std::string()); template<int size> void addTanh(); }; #include "Network_buildFunctions.h"
atomic_messages.c
// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -DOMP51 -verify=expected,omp50,omp51 -fopenmp -fopenmp-version=51 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -DOMP51 -verify=expected,omp50,omp51 -fopenmp-simd -fopenmp-version=51 -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo(void) { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; } goto L2; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint(void) { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS(void) { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint(void) { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS(void) { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint(void) { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint(void) { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; } void hint(void) { int a = 0; #pragma omp atomic hint // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected '(' after 'hint'}} a += 1; #pragma omp atomic hint( // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(+ // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(a // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(a) // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(1) hint(1) // omp45-error 2 {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{directive '#pragma omp atomic' cannot contain more than one 'hint' clause}} a += 1; } #ifdef OMP51 extern void bbar(void); extern int ffoo(void); void compare(void) { int x = 0; int d = 0; int e = 0; // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected compound statement}} #pragma omp atomic compare {} // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected exactly one expression statement}} #pragma omp atomic compare { x = d; x = e; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare { x += d; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare { bbar(); } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected conditional operator}} #pragma omp atomic compare { x = d; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect binary operator in conditional expression}} #pragma omp atomic compare { x = ffoo() ? e : x; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect '<', '>' or '==' as order operator}} #pragma omp atomic compare { x = x >= e ? e : x; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}} #pragma omp atomic compare { x = d > e ? e : x; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect result value to be at false expression}} #pragma omp atomic compare { x = d > x ? e : d; } // omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+3 {{expect binary operator in conditional expression}} #pragma omp atomic compare { if (foo()) x = d; } // omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+3 {{expect '<', '>' or '==' as order operator}} #pragma omp atomic compare { if (x >= d) x = d; } // omp51-error@+4 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+3 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}} #pragma omp atomic compare { if (e > d) x = d; } // omp51-error@+3 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected exactly one expression statement}} #pragma omp atomic compare { if (x > d) x = e; d = e; } // omp51-error@+7 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+6 {{unexpected 'else' statement}} #pragma omp atomic compare { if (x > e) x = e; else d = e; } float fx = 0.0f; float fd = 0.0f; float fe = 0.0f; // omp51-error@+5 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+4 {{expect integer value}} #pragma omp atomic compare { if (fx > fe) fx = fe; } // omp51-error@+5 {{the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}', '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+4 {{expect integer value}} #pragma omp atomic compare { if (fx == fe) fx = fe; } } void compare_capture(void) { int x = 0; int d = 0; int e = 0; int v = 0; int r = 0; // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected compound statement}} #pragma omp atomic compare capture if (x == e) {} // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected exactly one expression statement}} #pragma omp atomic compare capture if (x == e) { x = d; v = x; } // omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+3 {{expected assignment statement}} #pragma omp atomic compare capture if (x == e) { bbar(); } // omp51-error@+4 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+3 {{expected assignment statement}} #pragma omp atomic compare capture if (x == e) { x += d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect binary operator in conditional expression}} #pragma omp atomic compare capture if (ffoo()) { x = d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect '==' operator}} #pragma omp atomic compare capture if (x > e) { x = d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}} #pragma omp atomic compare capture if (d == e) { x = d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect 'else' statement}} #pragma omp atomic compare capture if (x == e) { x = d; } // omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+4 {{expected compound statement}} #pragma omp atomic compare capture if (x == e) { x = d; } else { } // omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+4 {{expected exactly one expression statement}} #pragma omp atomic compare capture if (x == e) { x = d; } else { v = x; d = e; } // omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+5 {{expected assignment statement}} #pragma omp atomic compare capture if (x == e) { x = d; } else { bbar(); } // omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+5 {{expected assignment statement}} #pragma omp atomic compare capture if (x == e) { x = d; } else { v += x; } // omp51-error@+6 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+5 {{expect an assignment statement 'v = x'}} #pragma omp atomic compare capture if (x == e) { x = d; } else { v = d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected compound statement}} #pragma omp atomic compare capture {} // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect a compound statement}} #pragma omp atomic compare capture x = x > e ? e : x; // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect a 'if' statement}} #pragma omp atomic compare capture { x = x > e ? e : x; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect a form 'r = x == e; if (r) ...'}} #pragma omp atomic compare capture { r = x == e; if (x == d) { x = e; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { r = x == e; if (r) { bbar(); } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x += d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected compound statement}} #pragma omp atomic compare capture { r = x == e; if (r) {} } // omp51-error@+5 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+4 {{expected exactly one expression statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; v = x; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect '==' operator}} #pragma omp atomic compare capture { r = x > e; if (r) { x = d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'}} #pragma omp atomic compare capture { r = d == e; if (r) { x = d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected compound statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; } else {} } // omp51-error@+7 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+6 {{expected exactly one expression statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; } else { v = x; d = e; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; } else { bbar(); } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; } else { v += x; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect an assignment statement 'v = x'}} #pragma omp atomic compare capture { r = x == e; if (r) { x = d; } else { v = d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { v += x; if (x == e) { x = d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expected assignment statement}} #pragma omp atomic compare capture { if (x == e) { x = d; } v += x; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect an assignment statement 'v = x'}} #pragma omp atomic compare capture { v = d; if (x == e) { x = d; } } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect an assignment statement 'v = x'}} #pragma omp atomic compare capture { if (x == e) { x = d; } v = d; } // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect a 'if' statement}} #pragma omp atomic compare capture { v = x; bbar(); } float fv; // omp51-error@+3 {{the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}', '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type, and 'ordop' is one of '<' or '>'.}} // omp51-note@+2 {{expect integer value}} #pragma omp atomic compare capture { fv = x; if (x == e) { x = d; } } } #endif
pop.h
// header file for ind struct #pragma once #ifndef POP_H #define POP_H //#include "params.h" //#include "data.h" //#include "RPN_class.h" #include "op_node.h" #include "rnd.h" #include "strdist.h" //#include <Eigen/Dense> using Eigen::MatrixXf; using Eigen::VectorXf; #include <boost/uuid/uuid.hpp> #include <boost/uuid/uuid_generators.hpp> //#include "general_fns.h" //#include "pareto.h" struct ind { /* =================================================================== BE SURE TO ADD ANY NEW VARIABLES TO THE SWAP FUNCTION FOR COPYING!! =================================================================== */ //unsigned int id; /*vector <std::shared_ptr<node> > line;*/ vector <node> line; vector<float> output; vector<float> output_v; vector<float> error; // fitnesses for lexicase selection vector<float> f; // vector of objectives for multi-objective implementations (PS_sel) std::vector<unsigned int> stack_floatlen; std::vector<float> stack_float; // linearized stack_float std::vector<int> dominated; //for spea2 strength std::vector<MatrixXf> C; // covariance matrices for M3GP std::string eqn; std::string eqn_form; // equation form for string distance comparison to other forms std::string eqn_matlab; // equation for matlab (elementwise and protected operators) MatrixXf M; // centroids for M3GP float abserror; float abserror_v; float sq_error; float sq_error_v; float corr; float corr_v; float VAF; float VAF_v; float fitness; float fitness_v; float FEvar; //variance in fitness estimates (for sorting purposes) float GEvar; //variance in generality estimates (for sorting purposes) float genty; //generality float spea_fit; float parentfitness; int eff_size; int age; int rank; int complexity; int dim; char origin; // x: crossover, m: mutation, i: initialization boost::uuids::uuid tag; // uuid for graph database tracking vector<boost::uuids::uuid> parent_id; /* =================================================================== BE SURE TO ADD ANY NEW VARIABLES TO THE SWAP FUNCTION FOR COPYING!! =================================================================== */ ind() :tag(boost::uuids::random_generator()()) { abserror = 0; sq_error = 0; corr = 0; age = 1; genty = 1; } /*ind(const ind& x) { *this = x; }*/ ~ind() { //cout << "ind destructor\n"; //if(!line.empty()) //{ // for(vector<node*>::iterator it= line.begin();it!=line.end();it++) // delete(*it); // line.clear(); // //cout << "ind destructor deleted line nodes\n"; //} // } ind & operator = (ind s) // over-ride copy construction with swap { s.swap (*this); // Non-throwing swap return *this; } void swap (ind &s) { line.swap(s.line); // vectors output.swap(s.output); output_v.swap(s.output_v); error.swap(s.error); f.swap(s.f); stack_floatlen.swap(s.stack_floatlen); stack_float.swap(s.stack_float); dominated.swap(dominated); C.swap(s.C); parent_id.swap(s.parent_id); eqn.swap(s.eqn); // strings eqn_form.swap(s.eqn_form); eqn_matlab.swap(s.eqn_matlab); using std::swap; swap(this->M,s.M); swap(this->abserror,s.abserror); // floats swap(this->abserror_v,s.abserror_v); swap(this->sq_error, s.sq_error); // floats swap(this->sq_error_v, s.sq_error_v); swap(this->corr,s.corr); swap(this->corr_v,s.corr_v); swap(this->VAF,s.VAF); swap(this->VAF_v,s.VAF_v); swap(this->fitness,s.fitness); swap(this->fitness_v,s.fitness_v); swap(this->FEvar,s.FEvar); swap(this->GEvar,s.GEvar); swap(this->genty,s.genty); swap(this->spea_fit,s.spea_fit); swap(this->parentfitness,s.parentfitness); swap(this->eff_size,s.eff_size); // ints swap(this->age,s.age); swap(this->rank,s.rank); swap(this->complexity,s.complexity); swap(this->dim,s.dim); swap(this->origin,s.origin); // chars swap(this->tag,s.tag); //uuid identifier }//throw (); // Also see the non-throwing swap idiom ////swap optimization //void swap(ind&) throw(); //void init(string& nom_mod) //{ // eqn = nom_mod; // ptr.push_back(1); // ptr.push_back(nom_mod.size()-2); // //nominal_model=nom_mod; // //expression.register_symbol_table(d.symbol_table); //} void reset_introns() { for (size_t i = 0; i < line.size(); ++i) // set all introns to true line[i].intron = true; } void clrPhen() { abserror = 0; abserror_v=0; sq_error = 0; sq_error_v = 0; corr = 0; corr_v=0; fitness=0; fitness_v=0; VAF = 0; VAF_v = 0; eqn = ""; eqn_form=""; output.clear(); output_v.clear(); error.clear(); C.clear(); M.resize(0,0); genty = 1; //stack_float.clear(); // nominal model must be encased in set of parenthesis. the pointer points to that which is encased. //ptr[0]= 1; //ptr[1] = nom_mod.size()-2; } //private: // string& nominal_model; }; struct sub_ind { float fitness; float abserror; float sq_error; float corr; float VAF; float abserror_v; float sq_error_v; float corr_v; float VAF_v; string eqn; string eqn_matlab; int age; int complexity; int dim; sub_ind(){} void init(ind& x){ fitness = x.fitness; abserror = x.abserror; abserror_v = x.abserror_v; sq_error = x.sq_error; sq_error_v = x.sq_error_v; corr = x.corr; corr_v = x.corr_v; VAF = x.VAF; VAF_v = x.VAF_v; eqn = x.eqn; eqn_matlab = x.eqn_matlab; age=x.age; complexity = x.complexity; dim = x.dim; } ~sub_ind(){} }; //swap optimization inline void swap(ind& lhs, ind& rhs) { lhs.swap(rhs); } namespace std { template<> inline void swap<struct ind>(ind& lhs, ind& rhs) {lhs.swap(rhs); }} ////using std::swap; struct SortFit{ bool operator() (const ind& i,const ind& j) { return (i.fitness<j.fitness);} }; struct SortFit2{ bool operator() (const sub_ind& i,const sub_ind& j) { return (i.fitness<j.fitness);} }; struct SortRank{ bool operator() (const ind& i,const ind& j) { return (i.rank<j.rank);} }; struct SortGenty{ bool operator() (const ind& i,const ind& j) { return (i.genty<j.genty);} }; struct revSortRank{ bool operator() (ind& i,ind& j) { return (i.rank>j.rank);} }; struct SortEqnSize{ bool operator() (const ind& i,const ind& j) { return (i.eqn.size()<j.eqn.size());} }; struct SortFEVar{ bool operator() (const ind& i,const ind& j) { return (i.FEvar>j.FEvar);} }; struct SortGEVar{ bool operator() (const ind& i,const ind& j) { return (i.GEvar>j.GEvar);} }; struct SortComplexity{bool operator() (const ind& i,const ind& j) { return (i.complexity<j.complexity);}}; struct SortFit_v{ bool operator() (const ind& i,const ind& j) { return (i.fitness_v<j.fitness_v);}}; struct SortSize{ bool operator() (const ind& i,const ind& j) { return (i.line.size()<j.line.size());}}; struct SortAge{ bool operator() (const ind& i,const ind& j) { return (i.age < j.age );}}; struct sameEqn{ bool operator() (ind& i,ind& j) { return i.eqn==j.eqn;} }; struct sameEqnSize{ bool operator() (ind& i,ind& j) { return i.eqn.size()==j.eqn.size();} }; struct sameSizeFit{ bool operator() (ind& i,ind& j) { return (i.fitness==j.fitness && i.eqn.size()==j.eqn.size());} }; struct sameFit{ bool operator() (ind& i,ind& j) { return (i.fitness==j.fitness);} }; struct sameFit2{ bool operator() (sub_ind& i,sub_ind& j) { return (i.fitness==j.fitness);} }; struct sameOutput{ bool operator() (ind& i, ind& j) { if (i.output.size()==j.output.size()) return std::equal(i.output.begin(),i.output.end(),j.output.begin()); else return 0; } }; struct sameComplexity{bool operator() (const ind& i,const ind& j) { return (i.complexity==j.complexity);} }; struct sameFitComplexity{bool operator() (const ind& i,const ind& j) { return (i.fitness==j.fitness && i.complexity==j.complexity);} }; struct tribe{ vector <ind> pop; // population float best; float worst; tribe(int size,float& max_fit,float& min_fit) { pop.resize(size); best=max_fit; worst=min_fit; /*for(unsigned int i = 1;i<pop.size();++i) pop.at(i).init(nom_mod);*/ maxf=max_fit; minf=min_fit; } ~tribe() {} float bestFit() // returns best fitness value { /*#pragma omp parallel { float localbest = maxf; #pragma omp for schedule(static) for(int i = 0; i < pop.size(); ++i) localbest = min(localbest, pop.at(i).fitness); #pragma omp critical { best = min(localbest, best); } }*/ best = maxf; for(int i = 0; i < pop.size(); ++i) best = min(best, pop.at(i).fitness); return best; } float bestFit_v() // returns best fitness value { /*#pragma omp parallel { float localbest = maxf; #pragma omp for schedule(static) for(int i = 0; i < pop.size(); ++i) localbest = min(localbest, pop.at(i).fitness); #pragma omp critical { best = min(localbest, best); } }*/ best = maxf; for(int i = 0; i < pop.size(); ++i) best = min(best, pop.at(i).fitness_v); return best; } float worstFit() //worst fitness { worst = minf; /*#pragma omp parallel { float localworst = minf; #pragma omp for schedule(static) for(int i = 0; i < pop.size(); ++i) localworst = max(localworst, pop.at(i).fitness); #pragma omp critical { worst = max(localworst, worst); } }*/ for(int i = 0; i < pop.size(); ++i) worst = max(worst, pop.at(i).fitness); return worst; } float medFit() //median fitness { vector<float> fitness(pop.size()); for(int i =0; i < pop.size(); i++) fitness.at(i) = pop.at(i).fitness; sort(fitness.begin(),fitness.end()); if (pop.size() % 2==0) //even return fitness.at((int)floor((float)pop.size()/2)); else return (fitness.at(pop.size()/2)+fitness.at(pop.size()/2-1))/2; } float medFit_v() //median fitness { vector<float> fitness(pop.size()); for(int i =0; i < pop.size(); i++) fitness.at(i) = pop.at(i).fitness_v; sort(fitness.begin(),fitness.end()); if (pop.size() % 2==0) //even return fitness.at((int)floor((float)pop.size()/2)); else return (fitness.at(pop.size()/2)+fitness.at(pop.size()/2-1))/2; } float meanFit() // mean fitness { float answer=0; //#pragma omp parallel for reduction(+ : answer) for(int i=0; i<pop.size(); ++i) { answer+=pop.at(i).fitness; } return (float)answer/pop.size(); } float meanSize() // mean line length { float answer=0; //#pragma omp parallel for reduction(+ : answer) for(int i=0; i<pop.size(); ++i) { answer+=pop.at(i).line.size(); } return (float)answer/pop.size(); } float meanEffSize() { float answer=0; //#pragma omp parallel for reduction(+ : answer) for(int i=0; i<pop.size(); ++i) { answer+=pop.at(i).eff_size; } return (float)answer/pop.size(); } int medSize() // median line length { //vector<ind> tmppop = pop; sort(pop.begin(),pop.end(),SortSize()); int index = (int)floor((float)pop.size()/2); return int(pop.at(index).line.size()); } void topTen(vector <sub_ind>& eqns) //returns address to vector of equation strings { vector<sub_ind> tmppop(pop.size()); for (int i = 0;i<pop.size();++i) tmppop[i].init(pop.at(i)); //vector<ind> tmppop = pop; sort(tmppop.begin(),tmppop.end(),SortFit2()); unique(tmppop.begin(),tmppop.end(),sameFit2()); for (int i=0;i<10;++i) eqns.push_back(tmppop.at(i)); /*vector <float> fitnesses; int i=0; bool pass=true; while(eqns.size()<10 && i<pop.size()) { fitnesses.push_back(pop.at(i).fitness); for(unsigned int j=0;j<fitnesses.size()-1;++j) { if(fitnesses.at(j)==fitnesses.back()) { fitnesses.pop_back(); pass=0; break; } } if (pass) eqns.push_back(pop.at(i)); else pass=1; ++i; }*/ } void getbestsubind(sub_ind& bestind) { vector<sub_ind> subpop(pop.size()); for (int i = 0;i<pop.size();++i) subpop[i].init(pop.at(i)); //vector<ind> tmppop = pop; sort(subpop.begin(),subpop.end(),SortFit2()); bestind = subpop.front(); }// address of best individual void getbestind(ind& bestind) { //vector<ind> tmppop = pop; sort(pop.begin(),pop.end(),SortFit()); bestind = pop.front(); }// address of best individual void sortpop() { sort(pop.begin(),pop.end(),SortFit()); } void sortpop_age() { sort(pop.begin(),pop.end(),SortAge()); } void novelty(float& novelty) { // calculate novelty, where novelty is defined as the percent of unique errors in population /*vector<sub_ind> subpop(pop.size()); for (int i = 0;i<pop.size();++i) subpop[i].init(pop.at(i)); std::sort(subpop.begin(),subpop.end(),SortFit2()); subpop.erase(unique(subpop.begin(),subpop.end(),sameFit2()),subpop.end()); novelty = float(subpop.size())/float(pop.size());*/ // novelty instead defined as the number of unique error vectors vector<ind> tmppop = pop; std::sort(tmppop.begin(),tmppop.end(),SortFit()); tmppop.erase(unique(tmppop.begin(),tmppop.end(),sameOutput()),tmppop.end()); novelty = float(tmppop.size())/float(pop.size()); } void hom(vector<Randclass>& r, float& tot_hom, float& on_hom, float& off_hom) { tot_hom = 0; on_hom=0; off_hom=0; //float sum_strdist=0; int c1, c2, s_tot,s_on,s_off; float tot_tmp=0,on_tmp=0,off_tmp=0; int samplesize=200; std::string tmp1, tmp2, tmp1on, tmp2on, tmp1off, tmp2off; //std::string tmp2; for (int i=0; i<samplesize; ++i) { //reset temporary strings tmp1.resize(0); tmp2.resize(0); tmp1on.resize(0); tmp2on.resize(0); tmp1off.resize(0); tmp2off.resize(0); tot_tmp = 0; on_tmp = 0; off_tmp = 0; c1 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); c2 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); for (int j=pop.at(c1).line.size()-1; j>=0;--j){ if (pop.at(c1).line.at(j).type=='v') tmp1 += pop.at(c1).line.at(j).varname; else tmp1 += pop.at(c1).line.at(j).type; if(pop.at(c1).line.at(j).on){ if (pop.at(c1).line.at(j).type=='v') tmp1on += pop.at(c1).line.at(j).varname; else tmp1on += pop.at(c1).line.at(j).type; } /*else tmp1on += ' ';*/ if(!pop.at(c1).line.at(j).on){ if (pop.at(c1).line.at(j).type=='v') tmp1off += pop.at(c1).line.at(j).varname; else tmp1off += pop.at(c1).line.at(j).type; } /*else tmp1off += ' ';*/ } for (int j=pop.at(c2).line.size()-1; j>=0;--j){ if (pop.at(c2).line.at(j).type=='v') tmp2 += pop.at(c2).line.at(j).varname; else tmp2 += pop.at(c2).line.at(j).type; if(pop.at(c2).line.at(j).on){ if (pop.at(c2).line.at(j).type=='v') tmp2on += pop.at(c2).line.at(j).varname; else tmp2on += pop.at(c2).line.at(j).type; } /*else tmp2on += ' ';*/ if(!pop.at(c2).line.at(j).on){ if (pop.at(c2).line.at(j).type=='v') tmp2off += pop.at(c2).line.at(j).varname; else tmp2off += pop.at(c2).line.at(j).type; } /*else tmp2off += ' ';*/ } s_tot = strdist(tmp1,tmp2); s_on = strdist(tmp1on,tmp2on); //s_off = s_tot-s_on; if (tmp1off.size()>0 && tmp2off.size()>0) s_off = strdist(tmp1off,tmp2off); else s_off = 13785; tot_tmp = float(s_tot)/float(std::max(tmp1.size(),tmp2.size())); on_tmp = float(s_on)/float(std::max(tmp1on.size(),tmp2on.size())); if (s_off!= 13785) off_tmp = float(s_off)/float(std::max(tmp1off.size(),tmp2off.size())); else off_tmp = 1; tot_hom += tot_tmp; on_hom += on_tmp; off_hom += off_tmp; } tot_hom = 1-tot_hom/samplesize; on_hom = 1-on_hom/samplesize; off_hom = 1-off_hom/samplesize; } /*float on_hom(vector<Randclass>& r){ float sum_strdist=0; int c1, c2; int samplesize = 100; std::string tmp1; std::string tmp2; for (int i=0; i<samplesize; ++i) { c1 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); c2 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); for(int j=pop.at(c1).line.size()-1; j>=0;--j){ if(pop.at(c1).line.at(j).on) tmp1 += pop.at(c1).line.at(j).type; else tmp1 += ' '; } for (int j=pop.at(c2).line.size()-1; j>=0;--j){ if(pop.at(c2).line.at(j).on) tmp2 += pop.at(c2).line.at(j).type; else tmp2 += ' '; } sum_strdist += strdist(tmp1,tmp2)/float(std::max(tmp1.size(),tmp2.size())); } return 1-sum_strdist/samplesize; } float off_hom(vector<Randclass>& r){ float sum_strdist=0; int c1, c2; int samplesize=100; std::string tmp1; std::string tmp2; for (int i=0; i<samplesize; ++i) { c1 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); c2 = r[omp_get_thread_num()].rnd_int(0,pop.size()-1); for(int j=pop.at(c1).line.size(); j>0;--j){ if(!pop.at(c1).line.at(j-1).on) tmp1 += pop.at(c1).line.at(j-1).type; else tmp1 += ' '; } for (int j=pop.at(c2).line.size(); j>0;--j){ if(!pop.at(c2).line.at(j-1).on) tmp2 += pop.at(c2).line.at(j-1).type; else tmp2 += ' '; } sum_strdist += strdist(tmp1,tmp2)/float(std::max(tmp1.size(),tmp2.size())); } return 1-sum_strdist/samplesize; }*/ /* private: bool fitlow (ind& i,ind& j) { return (i.fitness<j.fitness); } bool eqncomp(ind& i,ind& j) { return (i.eqn_form.compare(j.eqn_form)==0); } bool fitcomp (ind& i,ind& j) { return (i.fitness==j.fitness); } */ private: float maxf; float minf; }; #endif
GB_binop__lxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32) // A*D function (colscale): GB (_AxD__lxor_uint32) // D*A function (rowscale): GB (_DxB__lxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32) // C=scalar+B GB (_bind1st__lxor_uint32) // C=scalar+B' GB (_bind1st_tran__lxor_uint32) // C=A+scalar GB (_bind2nd__lxor_uint32) // C=A'+scalar GB (_bind2nd_tran__lxor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
backend.c
/* Copyright 2013 Samsung R&D Institute Russia All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! @file backend.c * @brief FFTF backends engine implementation. * @author Markovtsev Vadim <v.markovtsev@samsung.com> * @version 1.0 * * @section Notes * This code partially conforms to <a href="http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml">Google C++ Style Guide</a>. * * @section Copyright * Copyright 2013 Samsung R&D Institute Russia */ #include "src/backend.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <sys/types.h> #include <unistd.h> #include "src/engine_kiss.h" #include "src/engine_ooura.h" #include "src/engine_libav.h" #ifndef __arm__ #include "src/engine_ipp.h" #include "src/engine_mkl.h" #endif #ifdef GPL #include "src/engine_fftw3.h" #endif #ifdef OPENCL #include "src/engine_appml.h" #include "src/engine_viennacl.h" #endif #ifdef CUDA #include "src/engine_cufft.h" #endif #ifndef strdup char *strdup(const char *str) { int n = strlen(str) + 1; char *dup = malloc(n); if (dup) { strcpy(dup, str); } return dup; } #endif #define BACKEND_INIT(id, lib) \ { id, lib, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, \ NULL, NULL, NULL } Backend Backends[FFTF_COUNT_BACKENDS] = { { FFTF_BACKEND_KISS, NULL, 0, NULL, NULL, init_kiss, NULL, calc_kiss, NULL, destroy_kiss, NULL, malloc_kiss, free_kiss, NULL, NULL }, { FFTF_BACKEND_OOURA, NULL, 1, NULL, NULL, init_ooura, NULL, calc_ooura, NULL, destroy_ooura, NULL, NULL, NULL, NULL, NULL }, #ifdef GPL { FFTF_BACKEND_FFTW3, "libfftw3f.so.3", 0, load_fftw3, unload_fftw3, init_fftw3, NULL, calc_fftw3, NULL, destroy_fftw3, NULL, malloc_fftw3, free_fftw3, NULL, NULL }, #endif { FFTF_BACKEND_LIBAV, "libavcodec.so.53", 1, load_libav, unload_libav, init_libav, NULL, calc_libav, NULL, destroy_libav, NULL, malloc_libav, free_libav, NULL, NULL }, #ifndef __arm__ { FFTF_BACKEND_IMKL, "libmkl_rt.so", 1, load_mkl, unload_mkl, init_mkl, NULL, calc_mkl, NULL, destroy_mkl, NULL, malloc_mkl, free_mkl, NULL, NULL }, { FFTF_BACKEND_IIPP, "libipps.so", 1, load_ipp, unload_ipp, init_ipp, NULL, calc_ipp, NULL, destroy_ipp, NULL, malloc_ipp, free_ipp, NULL, NULL }, #endif #ifdef CUDA { FFTF_BACKEND_CUFFT, "libcufft.so", 0, load_cufft, unload_cufft, NULL, init_many_cufft, NULL, calc_many_cufft, NULL, destroy_many_cufft, NULL, NULL, NULL, NULL }, #endif #ifdef OPENCL { FFTF_BACKEND_APPML, "libclAmdFft.Runtime.so", 0, load_appml, unload_appml, NULL, init_many_appml, NULL, calc_many_appml, NULL, destroy_many_appml, NULL, NULL, NULL, NULL }, { FFTF_BACKEND_VIENNACL, NULL, 1, NULL, NULL, NULL, init_many_viennacl, NULL, calc_many_viennacl, NULL, destroy_many_viennacl, NULL, NULL, NULL, NULL }, #endif }; const char **BackendAdditionalPaths = NULL; FFTFBackend BackendAdditionalLibs[FFTF_COUNT_BACKENDS] = { { FFTF_BACKEND_KISS, NULL}, { FFTF_BACKEND_OOURA, NULL}, #ifdef GPL { FFTF_BACKEND_FFTW3, NULL}, #endif { FFTF_BACKEND_LIBAV, NULL}, #ifndef __arm__ { FFTF_BACKEND_IMKL, NULL}, { FFTF_BACKEND_IIPP, NULL}, #endif #ifdef CUDA { FFTF_BACKEND_CUFFT, NULL}, #endif #ifdef OPENCL { FFTF_BACKEND_APPML, NULL}, { FFTF_BACKEND_VIENNACL,NULL} #endif }; int InstancesCount = 0; pthread_mutex_t InstancesMutex = PTHREAD_MUTEX_INITIALIZER; static void unload_backend(FFTFBackend *lib) { pthread_mutex_lock(&InstancesMutex); assert(Backends[lib->id].unload != NULL); Backends[lib->id].unload(Backends[lib->id].internalData); bzero(&Backends[lib->id].libraryCurrentPath, offsetof(Backend, internalData) - offsetof(Backend, libraryCurrentPath)); pthread_mutex_unlock(&InstancesMutex); } static int load_backend_internal(FFTFBackendId id, const char *path, int trial) { assert(Backends[id].load != NULL); pthread_mutex_lock(&InstancesMutex); int res = Backends[id].load(path, &Backends[id].internalData, trial); if (res) { Backends[id].libraryCurrentPath = path; } pthread_mutex_unlock(&InstancesMutex); return res; } FFTF_SET_BACKEND_RESULT load_backend(FFTFBackend *lib, int trial) { assert(lib != NULL); // These are built-in if (lib->id == FFTF_BACKEND_KISS || lib->id == FFTF_BACKEND_OOURA #ifdef OPENCL || lib->id == FFTF_BACKEND_VIENNACL #endif ) { return FFTF_SET_BACKEND_SUCCESS; } assert(Backends[lib->id].load != NULL); // Unload the previous library if (Backends[lib->id].libraryCurrentPath != NULL) { unload_backend(lib); } int loaded = 0; lib->path = NULL; if (BackendAdditionalLibs[lib->id].path != NULL) { if (load_backend_internal(lib->id, BackendAdditionalLibs[lib->id].path, trial)) { loaded = 1; } } if (!loaded && BackendAdditionalPaths != NULL) { for (int i = 0; BackendAdditionalPaths[i] != NULL; i++) { char libpath[strlen(BackendAdditionalPaths[i]) + strlen(Backends[lib->id].libraryDefaultPath) + 2]; snprintf(libpath, sizeof(libpath), "%s/%s", BackendAdditionalPaths[i], Backends[lib->id].libraryDefaultPath); if (load_backend_internal(lib->id, libpath, trial)) { loaded = 1; break; } } } if (!loaded) { if (!load_backend_internal(lib->id, Backends[lib->id].libraryDefaultPath, trial)) { if (BackendAdditionalPaths == NULL && BackendAdditionalLibs[lib->id].path == NULL) { void *handle = dlopen(Backends[lib->id].libraryDefaultPath, RTLD_NOW); if (handle == NULL) { return FFTF_SET_BACKEND_NO_LIBS_FOUND; } dlclose(handle); } return FFTF_SET_BACKEND_FAILED_TO_LOAD; } } lib->path = Backends[lib->id].libraryCurrentPath; return FFTF_SET_BACKEND_SUCCESS; } static void copy_paths_and_libs(const char *const *additionalPaths, const FFTFBackend *additionalLibs) { // free() old paths if (BackendAdditionalPaths != NULL) { for (int i = 0; BackendAdditionalPaths[i] != NULL; i++) { free((char *)BackendAdditionalPaths[i]); } free(BackendAdditionalPaths); } // copy new paths if (additionalPaths != NULL) { int i; for (i = 1; additionalPaths[i] != NULL; i++); BackendAdditionalPaths = malloc(sizeof(const char *) * (i + 1)); for (i = 0; additionalPaths[i] != NULL; i++) { BackendAdditionalPaths[i] = strdup(additionalPaths[i]); } BackendAdditionalPaths[i] = NULL; } else { BackendAdditionalPaths = NULL; } // copy new libs if (additionalLibs != NULL) { int i; for (i = 0; additionalLibs[i].id != FFTF_BACKEND_NONE; i++) { assert(additionalLibs[i].path != NULL); if (BackendAdditionalLibs[additionalLibs[i].id].path != NULL) { free((char *)BackendAdditionalLibs[additionalLibs[i].id].path); } BackendAdditionalLibs[additionalLibs[i].id].path = strdup(additionalLibs[i].path); } } } void scan_backends(FFTFBackend *libs, const char *const *additionalPaths, const FFTFBackend *additionalLibs) { pthread_mutex_lock(&InstancesMutex); // Do not let the backends reloading invalidate any existing FFTF instances assert(InstancesCount == 0); pthread_mutex_unlock(&InstancesMutex); copy_paths_and_libs(additionalPaths, additionalLibs); for (int i = FFTF_BACKEND_NONE + 1; i < FFTF_COUNT_BACKENDS; i++) { // TODO: implement all backends and remove this check if (Backends[i].load == NULL) continue; load_backend(&libs[i], 1); } } void free_backends(FFTFBackend *libs) { for (int i = FFTF_BACKEND_NONE + 1; i < FFTF_COUNT_BACKENDS; i++) { // TODO: implement all backends and remove this check if (Backends[i].load == NULL) continue; if (Backends[i].libraryCurrentPath != NULL) { unload_backend(&libs[i]); } } } #define FFTF_SINGLE_INSTANCE(instance, i) { \ instance->id, \ instance->internalData[i], \ instance->type, \ instance->direction, \ instance->dimension, \ instance->options, \ instance->lengths, \ instance->lengths[0], \ instance->inputs[i], \ instance->outputs[i] } FFTFInstance *backend_init(FFTFBackendId id, FFTFType type, FFTFDirection direction, FFTFDimension dimension, const int *lengths, FFTFOptions options, int batchSize, const float *const *inputs, float *const *outputs) { assert((!Backends[id].only1d || dimension == FFTF_DIMENSION_1D) && "Not implemented"); pthread_mutex_lock(&InstancesMutex); FFTFInstance *instance = malloc(sizeof(FFTFInstance)); instance->id = id; size_t ptr_table_size = sizeof(void *) * batchSize; instance->internalData = NULL; instance->batchSize = batchSize; instance->inputs = malloc(ptr_table_size); memcpy((const float **)instance->inputs, inputs, ptr_table_size); size_t lengths_size = sizeof(*lengths) * dimension; instance->lengths = malloc(lengths_size); memcpy((int *)instance->lengths, lengths, lengths_size); instance->direction = direction; instance->options = options; instance->outputs = malloc(ptr_table_size); memcpy((const float **)instance->outputs, outputs, ptr_table_size); instance->type = type; instance->dimension = dimension; assert(pthread_mutex_init(&instance->lock, NULL) == 0); if (Backends[id].init == NULL) { assert(Backends[id].calc == NULL); assert(Backends[id].init_many != NULL); assert(Backends[id].calc_many != NULL); Backends[id].init_many(Backends[id].internalData, instance); } else { instance->internalData = malloc(ptr_table_size); bzero(instance->internalData, ptr_table_size); if (batchSize == 1) { FFTFSingleInstance si = FFTF_SINGLE_INSTANCE(instance, 0); Backends[id].init(Backends[id].internalData, &si); instance->internalData[0] = si.internalData; } else { for (int i = 0; i < batchSize; i++) { FFTFSingleInstance si = FFTF_SINGLE_INSTANCE(instance, i); Backends[id].init(Backends[id].internalData, &si); instance->internalData[i] = si.internalData; } } } InstancesCount++; pthread_mutex_unlock(&InstancesMutex); return instance; } void backend_calc(const FFTFInstance *instance) { pthread_mutex_lock((pthread_mutex_t *)&instance->lock); assert(instance->id != FFTF_BACKEND_NONE); if (Backends[instance->id].calc == NULL) { assert(Backends[instance->id].calc_many != NULL); Backends[instance->id].calc_many(Backends[instance->id].internalData, instance); } else if (instance->batchSize == 1) { FFTFSingleInstance si = FFTF_SINGLE_INSTANCE(instance, 0); Backends[instance->id].calc(Backends[instance->id].internalData, &si); } else { #pragma omp parallel for num_threads(fftf_get_openmp_num_threads()) for (int i = 0; i < instance->batchSize; i++) { FFTFSingleInstance si = FFTF_SINGLE_INSTANCE(instance, i); Backends[instance->id].calc(Backends[instance->id].internalData, &si); } } pthread_mutex_unlock((pthread_mutex_t *)&instance->lock); } void backend_destroy(FFTFInstance *instance) { pthread_mutex_lock(&InstancesMutex); pthread_mutex_lock(&instance->lock); assert(instance->id != FFTF_BACKEND_NONE); if (Backends[instance->id].destroy != NULL) { assert(Backends[instance->id].destroy_many == NULL); for (int i = 0; i < instance->batchSize; i++) { FFTFSingleInstance si = FFTF_SINGLE_INSTANCE(instance, i); Backends[instance->id].destroy(Backends[instance->id].internalData, &si); } } else { assert(Backends[instance->id].destroy_many != NULL); Backends[instance->id].destroy_many(Backends[instance->id].internalData, instance); } if (Backends[instance->id].init != NULL) { free(instance->internalData); } free((const float **)instance->inputs); free((const float **)instance->outputs); free((int *)instance->lengths); instance->id = FFTF_BACKEND_NONE; pthread_mutex_unlock(&instance->lock); // Give a chance for any pending fftf_calc() to assert. sched_yield(); pthread_mutex_destroy(&instance->lock); free(instance); InstancesCount--; pthread_mutex_unlock(&InstancesMutex); } void *backend_malloc(FFTFBackendId id, size_t size) { if (Backends[id].malloc != NULL) { return Backends[id].malloc(Backends[id].internalData, size); } return malloc(size); } void backend_free(FFTFBackendId id, void *ptr) { if (Backends[id].free != NULL) { Backends[id].free(Backends[id].internalData, ptr); } else { free(ptr); } } void copy_input_to_output(const FFTFSingleInstance *instance) { if (instance->output != instance->input) { int length = 0; for (int i = 0; i < (int)instance->dimension; i++) { length += instance->lengths[i]; } size_t size = length * sizeof(float) * (instance->type == FFTF_TYPE_COMPLEX? 2 : 1); memcpy(instance->output, instance->input, size); } }
GB_binop__lxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32) // A*D function (colscale): GB (_AxD__lxor_uint32) // D*A function (rowscale): GB (_DxB__lxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32) // C=scalar+B GB (_bind1st__lxor_uint32) // C=scalar+B' GB (_bind1st_tran__lxor_uint32) // C=A+scalar GB (_bind2nd__lxor_uint32) // C=A'+scalar GB (_bind2nd_tran__lxor_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a2.c
#include "omp.h" void axpy(int N, float *Y, float *X, float a) { int i; #pragma omp target data map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i) Y[i] += a * X[i]; }
GB_unaryop__ainv_int16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint16 // op(A') function: GB_tran__ainv_int16_uint16 // C type: int16_t // A type: uint16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint16 ( int16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__isinf_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isinf_bool_fp64) // op(A') function: GB (_unop_tran__isinf_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isinf (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isinf (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isinf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isinf_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isinf_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NormalizeIntensityImageFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_NORMALIZEINTENSITYIMAGEFILTER_H #define BK_NORMALIZEINTENSITYIMAGEFILTER_H #include <algorithm> namespace bk { class NormalizeIntensityImageFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = NormalizeIntensityImageFilter; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR constexpr NormalizeIntensityImageFilter() = default; constexpr NormalizeIntensityImageFilter(const self_type&) = default; constexpr NormalizeIntensityImageFilter(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~NormalizeIntensityImageFilter() = default; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] constexpr auto operator=(const self_type& other) -> self_type& = default; [[maybe_unused]] constexpr auto operator=(self_type&& other) noexcept -> self_type& = default; /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TImage> [[nodiscard]] static typename TImage::template self_template_type<double> apply(const TImage& img) { typename TImage::template self_template_type<double> res; res.set_size(img.size()); auto[itMinVal, itMaxVal] = std::minmax_element(img.begin(), img.end()); const auto range = *itMaxVal - *itMinVal; #pragma omp parallel for for (unsigned int i = 0; i < img.num_values(); ++i) { res[i] = (img[i] - *itMinVal) / range; } return res; } /// @} }; // class NormalizeIntensityImageFilter } // namespace bk #endif //BK_NORMALIZEINTENSITYIMAGEFILTER_H
dpx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD PPPP X X % % D D P P X X % % D D PPPP XXX % % D D P X X % % DDDD P X X % % % % % % Read/Write SMTPE DPX Image Format % % % % Software Design % % John Cristy % % March 2001 % % % % % % Copyright 1999-2008 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/module.h" /* Typedef declaration. */ typedef enum { UserDefinedColorimetric = 0, PrintingDensityColorimetric = 1, LinearColorimetric = 2, LogarithmicColorimetric = 3, UnspecifiedVideoColorimetric = 4, SMTPE_274MColorimetric = 5, ITU_R709Colorimetric = 6, ITU_R601_625LColorimetric = 7, ITU_R601_525LColorimetric = 8, NTSCCompositeVideoColorimetric = 9, PALCompositeVideoColorimetric = 10, ZDepthLinearColorimetric = 11, DepthHomogeneousColorimetric = 12 } DPXColorimetric; typedef enum { UndefinedComponentType = 0, RedComponentType = 1, GreenComponentType = 2, BlueComponentType = 3, AlphaComponentType = 4, LumaComponentType = 6, ColorDifferenceCbCrComponentType = 7, DepthComponentType = 8, CompositeVideoComponentType = 9, RGBComponentType = 50, RGBAComponentType = 51, ABGRComponentType = 52, CbYCrY422ComponentType = 100, CbYACrYA4224ComponentType = 101, CbYCr444ComponentType = 102, CbYCrA4444ComponentType = 103, UserDef2ElementComponentType = 150, UserDef3ElementComponentType = 151, UserDef4ElementComponentType = 152, UserDef5ElementComponentType = 153, UserDef6ElementComponentType = 154, UserDef7ElementComponentType = 155, UserDef8ElementComponentType = 156 } DPXComponentType; typedef struct _DPXFileInfo { unsigned int magic, image_offset; char version[8]; unsigned int file_size, ditto_key, generic_size, industry_size, user_size; char filename[100], timestamp[24], creator[100], project[200], copyright[200]; unsigned int encrypt_key; char reserve[104]; } DPXFileInfo; typedef struct _DPXFilmInfo { char id[2], type[2], offset[2], prefix[6], count[4], format[32]; unsigned int frame_position, sequence_extent, held_count; float frame_rate, shutter_angle; char frame_id[32], slate[100], reserve[56]; } DPXFilmInfo; typedef struct _DPXImageElement { unsigned int data_sign, low_data; float low_quantity; unsigned int high_data; float high_quantity; unsigned char descriptor, transfer, colorimetric, bit_size; unsigned short packing, encoding; unsigned int data_offset, end_of_line_padding, end_of_image_padding; unsigned char description[32]; } DPXImageElement; typedef struct _DPXImageInfo { unsigned short orientation, number_elements; unsigned int pixels_per_line, lines_per_element; DPXImageElement image_element[8]; unsigned char reserve[52]; } DPXImageInfo; typedef struct _DPXOrientationInfo { unsigned int x_offset, y_offset; float x_center, y_center; unsigned int x_size, y_size; char filename[100], timestamp[24], device[32], serial[32]; unsigned short border[4]; unsigned int aspect_ratio[2]; unsigned char reserve[28]; } DPXOrientationInfo; typedef struct _DPXTelevisionInfo { unsigned int time_code, user_bits; unsigned char interlace, field_number, video_signal, padding; float horizontal_sample_rate, vertical_sample_rate, frame_rate, time_offset, gamma, black_level, black_gain, break_point, white_level, integration_times; char reserve[76]; } DPXTelevisionInfo; typedef struct _DPXUserInfo { char id[32]; } DPXUserInfo; typedef struct DPXInfo { DPXFileInfo file; DPXImageInfo image; DPXOrientationInfo orientation; DPXFilmInfo film; DPXTelevisionInfo television; DPXUserInfo user; } DPXInfo; /* Forward declaractions. */ static MagickBooleanType WriteDPXImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D P X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDPX() returns MagickTrue if the image format type, identified by the % magick string, is DPX. % % The format of the IsDPX method is: % % MagickBooleanType IsDPX(const unsigned char *magick,const size_t extent) % % A description of each parameter follows: % % o magick: This string is generally the first few bytes of an image file % or blob. % % o extent: Specifies the extent of the magick string. % */ static MagickBooleanType IsDPX(const unsigned char *magick,const size_t extent) { if (extent < 4) return(MagickFalse); if (memcmp(magick,"SDPX",4) == 0) return(MagickTrue); if (memcmp(magick,"XPDS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D P X I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDPXImage() reads an DPX X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDPXImage method is: % % Image *ReadDPXImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static size_t GetBytesPerRow(unsigned long columns, unsigned long samples_per_pixel,unsigned long bits_per_pixel, MagickBooleanType pad) { size_t bytes_per_row; switch (bits_per_pixel) { case 1: { bytes_per_row=4*(((size_t) samples_per_pixel*columns* bits_per_pixel+31)/32); break; } case 8: default: { bytes_per_row=4*(((size_t) samples_per_pixel*columns* bits_per_pixel+31)/32); break; } case 10: { if (pad == MagickFalse) { bytes_per_row=4*(((size_t) samples_per_pixel*columns* bits_per_pixel+31)/32); break; } bytes_per_row=4*(((size_t) (32*((samples_per_pixel*columns)/3))+31)/32); break; } case 12: { if (pad == MagickFalse) { bytes_per_row=4*(((size_t) samples_per_pixel*columns* bits_per_pixel+31)/32); break; } bytes_per_row=2*(((size_t) (16*samples_per_pixel*columns)+15)/16); break; } case 16: { bytes_per_row=2*(((size_t) samples_per_pixel*columns* bits_per_pixel+8)/16); break; } case 32: { bytes_per_row=4*(((size_t) samples_per_pixel*columns* bits_per_pixel+31)/32); break; } case 64: { bytes_per_row=8*(((size_t) samples_per_pixel*columns* bits_per_pixel+63)/64); break; } } return(bytes_per_row); } static inline MagickBooleanType IsFloatDefined(const float value) { union { unsigned long unsigned_value; double float_value; } quantum; quantum.float_value=value; if (quantum.unsigned_value == 0U) return(MagickFalse); return(MagickTrue); } static void SetPrimaryChromaticity(const DPXColorimetric colorimetric, ChromaticityInfo *chromaticity_info) { switch(colorimetric) { case SMTPE_274MColorimetric: case ITU_R709Colorimetric: { chromaticity_info->red_primary.x=0.640; chromaticity_info->red_primary.y=0.330; chromaticity_info->red_primary.z=0.030; chromaticity_info->green_primary.x=0.300; chromaticity_info->green_primary.y=0.600; chromaticity_info->green_primary.z=0.100; chromaticity_info->blue_primary.x=0.150; chromaticity_info->blue_primary.y=0.060; chromaticity_info->blue_primary.z=0.790; chromaticity_info->white_point.x=0.3127; chromaticity_info->white_point.y=0.3290; chromaticity_info->white_point.z=0.3582; break; } case NTSCCompositeVideoColorimetric: { chromaticity_info->red_primary.x=0.67; chromaticity_info->red_primary.y=0.33; chromaticity_info->red_primary.z=0.00; chromaticity_info->green_primary.x=0.21; chromaticity_info->green_primary.y=0.71; chromaticity_info->green_primary.z=0.08; chromaticity_info->blue_primary.x=0.14; chromaticity_info->blue_primary.y=0.08; chromaticity_info->blue_primary.z=0.78; chromaticity_info->white_point.x=0.310; chromaticity_info->white_point.y=0.316; chromaticity_info->white_point.z=0.374; break; } case PALCompositeVideoColorimetric: { chromaticity_info->red_primary.x=0.640; chromaticity_info->red_primary.y=0.330; chromaticity_info->red_primary.z=0.030; chromaticity_info->green_primary.x=0.290; chromaticity_info->green_primary.y=0.600; chromaticity_info->green_primary.z=0.110; chromaticity_info->blue_primary.x=0.150; chromaticity_info->blue_primary.y=0.060; chromaticity_info->blue_primary.z=0.790; chromaticity_info->white_point.x=0.3127; chromaticity_info->white_point.y=0.3290; chromaticity_info->white_point.z=0.3582; break; } default: break; } } static void TimeCodeToString(const unsigned long timestamp,char *code) { #define TimeFields 7 unsigned int shift; register long i; *code='\0'; shift=4*TimeFields; for (i=0; i <= TimeFields; i++) { (void) FormatMagickString(code,MaxTextExtent-strlen(code),"%01u", (unsigned int) ((timestamp >> shift) & 0x0fU)); code++; if (((i % 2) != 0) && (i < TimeFields)) *code++=':'; shift-=4; *code='\0'; } } static Image *ReadDPXImage(const ImageInfo *image_info,ExceptionInfo *exception) { char magick[4], value[MaxTextExtent]; DPXInfo dpx; Image *image; long y; MagickBooleanType status; MagickOffsetType offset; QuantumType quantum_type; register long i; register PixelPacket *q; ssize_t count; size_t extent; unsigned char component_type; unsigned long samples_per_pixel; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read DPX file header. */ offset=0; count=ReadBlob(image,4,(unsigned char *) magick); offset+=count; if ((count != 4) || ((LocaleNCompare(magick,"SDPX",4) != 0) && (LocaleNCompare((char *) magick,"XPDS",4) != 0))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); image->endian=LSBEndian; if (LocaleNCompare(magick,"SDPX",4) == 0) image->endian=MSBEndian; (void) ResetMagickMemory(&dpx,0,sizeof(dpx)); dpx.file.image_offset=ReadBlobLong(image); offset+=4; offset+=ReadBlob(image,sizeof(dpx.file.version),(unsigned char *) dpx.file.version); (void) FormatImageProperty(image,"dpx:file.version","%.8s",dpx.file.version); dpx.file.file_size=ReadBlobLong(image); offset+=4; dpx.file.ditto_key=ReadBlobLong(image); offset+=4; if (dpx.file.ditto_key != ~0U) (void) FormatImageProperty(image,"dpx:file.ditto.key","%u", dpx.file.ditto_key); dpx.file.generic_size=ReadBlobLong(image); offset+=4; dpx.file.industry_size=ReadBlobLong(image); offset+=4; dpx.file.user_size=ReadBlobLong(image); offset+=4; offset+=ReadBlob(image,sizeof(dpx.file.filename),(unsigned char *) dpx.file.filename); (void) FormatImageProperty(image,"dpx:file.filename","%.100s", dpx.file.filename); (void) FormatImageProperty(image,"document","%.100s",dpx.file.filename); offset+=ReadBlob(image,sizeof(dpx.file.timestamp),(unsigned char *) dpx.file.timestamp); if (*dpx.file.timestamp != '\0') (void) FormatImageProperty(image,"dpx:file.timestamp","%.24s", dpx.file.timestamp); offset+=ReadBlob(image,sizeof(dpx.file.creator),(unsigned char *) dpx.file.creator); if (*dpx.file.creator != '\0') { (void) FormatImageProperty(image,"dpx:file.creator","%.100s", dpx.file.creator); (void) FormatImageProperty(image,"software","%.100s",dpx.file.creator); } offset+=ReadBlob(image,sizeof(dpx.file.project),(unsigned char *) dpx.file.project); if (*dpx.file.project != '\0') { (void) FormatImageProperty(image,"dpx:file.project","%.200s", dpx.file.project); (void) FormatImageProperty(image,"comment","%.100s",dpx.file.project); } offset+=ReadBlob(image,sizeof(dpx.file.copyright),(unsigned char *) dpx.file.copyright); if (*dpx.file.copyright != '\0') { (void) FormatImageProperty(image,"dpx:file.copyright","%.200s", dpx.file.copyright); (void) FormatImageProperty(image,"copyright","%.100s", dpx.file.copyright); } dpx.file.encrypt_key=ReadBlobLong(image); offset+=4; if (dpx.file.encrypt_key != ~0U) (void) FormatImageProperty(image,"dpx:file.encrypt_key","%u", dpx.file.encrypt_key); offset+=ReadBlob(image,sizeof(dpx.file.reserve),(unsigned char *) dpx.file.reserve); /* Read DPX image header. */ dpx.image.orientation=ReadBlobShort(image); offset+=2; if (dpx.image.orientation != (unsigned short) (~0U)) (void) FormatImageProperty(image,"dpx:image.orientation","%d", dpx.image.orientation); switch (dpx.image.orientation) { default: case 0: image->orientation=TopLeftOrientation; break; case 1: image->orientation=TopRightOrientation; break; case 2: image->orientation=BottomLeftOrientation; break; case 3: image->orientation=BottomRightOrientation; break; case 4: image->orientation=LeftTopOrientation; break; case 5: image->orientation=RightTopOrientation; break; case 6: image->orientation=LeftBottomOrientation; break; case 7: image->orientation=RightBottomOrientation; break; } dpx.image.number_elements=ReadBlobShort(image); offset+=2; dpx.image.pixels_per_line=ReadBlobLong(image); offset+=4; image->columns=dpx.image.pixels_per_line; dpx.image.lines_per_element=ReadBlobLong(image); offset+=4; image->rows=dpx.image.lines_per_element; for (i=0; i < 8; i++) { dpx.image.image_element[i].data_sign=ReadBlobLong(image); offset+=4; dpx.image.image_element[i].low_data=ReadBlobLong(image); offset+=4; dpx.image.image_element[i].low_quantity=ReadBlobFloat(image); offset+=4; dpx.image.image_element[i].high_data=ReadBlobLong(image); offset+=4; dpx.image.image_element[i].high_quantity=ReadBlobFloat(image); offset+=4; dpx.image.image_element[i].descriptor=(unsigned char) ReadBlobByte(image); offset++; dpx.image.image_element[i].transfer=(unsigned char) ReadBlobByte(image); offset++; dpx.image.image_element[i].colorimetric=(unsigned char) ReadBlobByte(image); offset++; dpx.image.image_element[i].bit_size=(unsigned char) ReadBlobByte(image); offset++; dpx.image.image_element[i].packing=ReadBlobShort(image); offset+=2; dpx.image.image_element[i].encoding=ReadBlobShort(image); offset+=2; dpx.image.image_element[i].data_offset=ReadBlobLong(image); offset+=4; dpx.image.image_element[i].end_of_line_padding=ReadBlobLong(image); offset+=4; dpx.image.image_element[i].end_of_image_padding=ReadBlobLong(image); offset+=4; offset+=ReadBlob(image,sizeof(dpx.image.image_element[i].description), (unsigned char *) dpx.image.image_element[i].description); } SetPrimaryChromaticity((DPXColorimetric) dpx.image.image_element[0].colorimetric,&image->chromaticity); offset+=ReadBlob(image,sizeof(dpx.image.reserve),(unsigned char *) dpx.image.reserve); component_type=dpx.image.image_element[0].descriptor; image->depth=dpx.image.image_element[0].bit_size; if (dpx.file.image_offset >= 1664U) { /* Read DPX orientation header. */ dpx.orientation.x_offset=ReadBlobLong(image); offset+=4; if (dpx.orientation.x_offset != ~0U) (void) FormatImageProperty(image,"dpx:orientation.x_offset","%u", dpx.orientation.x_offset); dpx.orientation.y_offset=ReadBlobLong(image); offset+=4; if (dpx.orientation.y_offset != ~0U) (void) FormatImageProperty(image,"dpx:orientation.y_offset","%u", dpx.orientation.y_offset); dpx.orientation.x_center=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.orientation.x_center)!= MagickFalse) (void) FormatImageProperty(image,"dpx:orientation.x_center","%g", dpx.orientation.x_center); dpx.orientation.y_center=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.orientation.y_center)!= MagickFalse) (void) FormatImageProperty(image,"dpx:orientation.y_center","%g", dpx.orientation.y_center); dpx.orientation.x_size=ReadBlobLong(image); offset+=4; if (dpx.orientation.x_size != ~0U) (void) FormatImageProperty(image,"dpx:orientation.x_size","%u", dpx.orientation.x_size); dpx.orientation.y_size=ReadBlobLong(image); offset+=4; if (dpx.orientation.y_size != ~0U) (void) FormatImageProperty(image,"dpx:orientation.y_size","%u", dpx.orientation.y_size); offset+=ReadBlob(image,sizeof(dpx.orientation.filename),(unsigned char *) dpx.orientation.filename); if (*dpx.orientation.filename != '\0') (void) FormatImageProperty(image,"dpx:orientation.filename","%.100s", dpx.orientation.filename); offset+=ReadBlob(image,sizeof(dpx.orientation.timestamp),(unsigned char *) dpx.orientation.timestamp); if (*dpx.orientation.timestamp != '\0') (void) FormatImageProperty(image,"dpx:orientation.timestamp","%.24s", dpx.orientation.timestamp); offset+=ReadBlob(image,sizeof(dpx.orientation.device),(unsigned char *) dpx.orientation.device); if (*dpx.orientation.device != '\0') (void) FormatImageProperty(image,"dpx:orientation.device","%.32s", dpx.orientation.device); offset+=ReadBlob(image,sizeof(dpx.orientation.serial),(unsigned char *) dpx.orientation.serial); if (*dpx.orientation.serial != '\0') (void) FormatImageProperty(image,"dpx:orientation.serial","%.32s", dpx.orientation.serial); for (i=0; i < 4; i++) { dpx.orientation.border[i]=ReadBlobShort(image); offset+=2; } if ((dpx.orientation.border[0] != (unsigned short) (~0U)) && (dpx.orientation.border[1] != (unsigned short) (~0U))) (void) FormatImageProperty(image,"dpx:orientation.border","%dx%d%+d%+d", dpx.orientation.border[0],dpx.orientation.border[1], dpx.orientation.border[2],dpx.orientation.border[3]); for (i=0; i < 2; i++) { dpx.orientation.aspect_ratio[i]=ReadBlobLong(image); offset+=4; } if ((dpx.orientation.aspect_ratio[0] != ~0U) && (dpx.orientation.aspect_ratio[1] != ~0U)) (void) FormatImageProperty(image,"dpx:orientation.aspect_ratio", "%ux%u",dpx.orientation.aspect_ratio[0], dpx.orientation.aspect_ratio[1]); offset+=ReadBlob(image,sizeof(dpx.orientation.reserve),(unsigned char *) dpx.orientation.reserve); } if (dpx.file.image_offset >= 1920U) { /* Read DPX film header. */ offset+=ReadBlob(image,sizeof(dpx.film.id),(unsigned char *) dpx.film.id); if (*dpx.film.type != '\0') (void) FormatImageProperty(image,"dpx:film.id","%.2s",dpx.film.id); offset+=ReadBlob(image,sizeof(dpx.film.type),(unsigned char *) dpx.film.type); if (*dpx.film.type != '\0') (void) FormatImageProperty(image,"dpx:film.type","%.2s",dpx.film.type); offset+=ReadBlob(image,sizeof(dpx.film.offset),(unsigned char *) dpx.film.offset); if (*dpx.film.offset != '\0') (void) FormatImageProperty(image,"dpx:film.offset","%.2s", dpx.film.offset); offset+=ReadBlob(image,sizeof(dpx.film.prefix),(unsigned char *) dpx.film.prefix); if (*dpx.film.prefix != '\0') (void) FormatImageProperty(image,"dpx:film.prefix","%.6s", dpx.film.prefix); offset+=ReadBlob(image,sizeof(dpx.film.count),(unsigned char *) dpx.film.count); if (*dpx.film.count != '\0') (void) FormatImageProperty(image,"dpx:film.count","%.4s", dpx.film.count); offset+=ReadBlob(image,sizeof(dpx.film.format),(unsigned char *) dpx.film.format); if (*dpx.film.format != '\0') (void) FormatImageProperty(image,"dpx:film.format","%.4s", dpx.film.format); dpx.film.frame_position=ReadBlobLong(image); offset+=4; if (dpx.film.frame_position != ~0U) (void) FormatImageProperty(image,"dpx:film.frame_position","%u", dpx.film.frame_position); dpx.film.sequence_extent=ReadBlobLong(image); offset+=4; if (dpx.film.sequence_extent != ~0U) (void) FormatImageProperty(image,"dpx:film.sequence_extent","%u", dpx.film.sequence_extent); dpx.film.held_count=ReadBlobLong(image); offset+=4; if (dpx.film.held_count != ~0U) (void) FormatImageProperty(image,"dpx:film.held_count","%u", dpx.film.held_count); dpx.film.frame_rate=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.film.frame_rate)!= MagickFalse) (void) FormatImageProperty(image,"dpx:film.frame_rate","%g", dpx.film.frame_rate); dpx.film.shutter_angle=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.film.shutter_angle)!= MagickFalse) (void) FormatImageProperty(image,"dpx:film.shutter_angle","%g", dpx.film.shutter_angle); offset+=ReadBlob(image,sizeof(dpx.film.frame_id),(unsigned char *) dpx.film.frame_id); if (*dpx.film.frame_id != '\0') (void) FormatImageProperty(image,"dpx:film.frame_id","%.32s", dpx.film.frame_id); offset+=ReadBlob(image,sizeof(dpx.film.slate),(unsigned char *) dpx.film.slate); if (*dpx.film.slate != '\0') (void) FormatImageProperty(image,"dpx:film.slate","%.100s", dpx.film.slate); offset+=ReadBlob(image,sizeof(dpx.film.reserve),(unsigned char *) dpx.film.reserve); } if (dpx.file.image_offset >= 2048U) { /* Read DPX television header. */ dpx.television.time_code=ReadBlobLong(image); offset+=4; TimeCodeToString(dpx.television.time_code,value); (void) SetImageProperty(image,"dpx:television.time.code",value); dpx.television.user_bits=(unsigned char) ReadBlobLong(image); offset+=4; TimeCodeToString(dpx.television.user_bits,value); (void) SetImageProperty(image,"dpx:television.user.bits",value); dpx.television.interlace=(unsigned char) ReadBlobByte(image); offset++; if (dpx.television.interlace != 0) (void) FormatImageProperty(image,"dpx:television.interlace","%ld",(long) dpx.television.interlace); dpx.television.field_number=(unsigned char) ReadBlobByte(image); offset++; if (dpx.television.field_number != 0) (void) FormatImageProperty(image,"dpx:television.field_number","%ld", (long) dpx.television.field_number); dpx.television.video_signal=(unsigned char) ReadBlobByte(image); offset++; if (dpx.television.video_signal != 0) (void) FormatImageProperty(image,"dpx:television.video_signal","%ld", (long) dpx.television.video_signal); dpx.television.padding=(unsigned char) ReadBlobByte(image); offset++; if (dpx.television.padding != 0) (void) FormatImageProperty(image,"dpx:television.padding","%d", dpx.television.padding); dpx.television.horizontal_sample_rate=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.horizontal_sample_rate)!= MagickFalse) (void) FormatImageProperty(image, "dpx:television.horizontal_sample_rate","%g", dpx.television.horizontal_sample_rate); dpx.television.vertical_sample_rate=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.vertical_sample_rate)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.vertical_sample_rate", "%g",dpx.television.vertical_sample_rate); dpx.television.frame_rate=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.frame_rate)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.frame_rate","%g", dpx.television.frame_rate); dpx.television.time_offset=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.time_offset)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.time_offset","%g", dpx.television.time_offset); dpx.television.gamma=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.gamma)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.gamma","%g", dpx.television.gamma); dpx.television.black_level=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.black_level)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.black_level","%g", dpx.television.black_level); dpx.television.black_gain=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.black_gain)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.black_gain","%g", dpx.television.black_gain); dpx.television.break_point=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.break_point)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.break_point","%g", dpx.television.break_point); dpx.television.white_level=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.white_level)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.white_level","%g", dpx.television.white_level); dpx.television.integration_times=ReadBlobFloat(image); offset+=4; if (IsFloatDefined(dpx.television.integration_times)!= MagickFalse) (void) FormatImageProperty(image,"dpx:television.integration_times", "%g",dpx.television.integration_times); offset+=ReadBlob(image,sizeof(dpx.television.reserve),(unsigned char *) dpx.television.reserve); } if (dpx.file.image_offset > 2080U) { /* Read DPX user header. */ offset+=ReadBlob(image,sizeof(dpx.user.id),(unsigned char *) dpx.user.id); if (*dpx.user.id != '\0') (void) FormatImageProperty(image,"dpx:user.id","%.32s",dpx.user.id); if ((dpx.file.user_size != ~0U) && ((size_t) dpx.file.user_size > sizeof(dpx.user.id))) { StringInfo *profile; profile=AcquireStringInfo(dpx.file.user_size-sizeof(dpx.user.id)); offset+=ReadBlob(image,GetStringInfoLength(profile), GetStringInfoDatum(profile)); (void) SetImageProfile(image,"dpx:user.data",profile); profile=DestroyStringInfo(profile); } } for ( ; offset < (long) dpx.file.image_offset; offset++) (void) ReadBlobByte(image); /* Read DPX image header. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Convert DPX raster image to pixel packets. */ samples_per_pixel=1; quantum_type=GrayQuantum; switch (component_type) { case CbYCrY422ComponentType: { samples_per_pixel=2; quantum_type=CbYCrYQuantum; break; } case CbYACrYA4224ComponentType: case CbYCr444ComponentType: case RGBComponentType: { samples_per_pixel=3; quantum_type=RGBQuantum; break; } case ABGRComponentType: case RGBAComponentType: { image->matte=MagickTrue; samples_per_pixel=4; quantum_type=RGBAQuantum; break; } default: break; } switch (component_type) { case CbYCrY422ComponentType: case CbYACrYA4224ComponentType: case CbYCr444ComponentType: { image->colorspace=Rec709YCbCrColorspace; break; } case LumaComponentType: { image->colorspace=RGBColorspace; break; } default: { image->colorspace=RGBColorspace; if (dpx.image.image_element[0].transfer == LogarithmicColorimetric) image->colorspace=LogColorspace; if (dpx.image.image_element[0].transfer == PrintingDensityColorimetric) image->colorspace=LogColorspace; break; } } extent=GetBytesPerRow(image->columns,samples_per_pixel,image->depth, dpx.image.image_element[0].packing == 0 ? MagickFalse : MagickTrue); if ((quantum_type == GrayQuantum) && (dpx.image.image_element[0].packing != 0) && (image->depth == 10) && (image->endian == MSBEndian)) { QuantumAny scale; QuantumInfo *quantum_info; register long x; unsigned char *pixels; unsigned long pixel; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); SetQuantumQuantum(quantum_info,32); SetQuantumPack(quantum_info,dpx.image.image_element[0].packing == 0 ? MagickTrue : MagickFalse); pixels=GetQuantumPixels(quantum_info); pixel=0U; i=0; scale=GetQuantumScale(image->depth); for (y=0; y < (long) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (long) image->columns; x++) { switch (i++ % 3) { case 0: { pixel=ReadBlobMSBLong(image); q->red=ScaleAnyToQuantum((pixel >> 0) & 0x3ff,image->depth,scale); break; } case 1: { q->red=ScaleAnyToQuantum((pixel >> 10) & 0x3ff,image->depth, scale); break; } case 2: { q->red=ScaleAnyToQuantum((pixel >> 20) & 0x3ff,image->depth, scale); break; } } q->green=q->red; q->blue=q->red; q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,y,image->rows); if (status == MagickFalse) break; } quantum_info=DestroyQuantumInfo(quantum_info); } else { long row; QuantumInfo *quantum_info; ViewInfo *image_view; /* DPX any-bit pixel format. */ status=MagickTrue; row=0; quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); SetQuantumQuantum(quantum_info,32); SetQuantumPack(quantum_info,dpx.image.image_element[0].packing == 0 ? MagickTrue : MagickFalse); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) shared(row,status,quantum_type) #endif for (y=0; y < (long) image->rows; y++) { long offset; MagickBooleanType sync; register PixelPacket *q; ssize_t count; size_t length; unsigned char *pixels; if (status == MagickFalse) continue; pixels=GetQuantumPixels(quantum_info); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical #endif { count=ReadBlob(image,extent,pixels); if ((image->progress_monitor != (MagickProgressMonitor) NULL) && (image->previous == (Image *) NULL)) { MagickBooleanType proceed; proceed=SetImageProgress(image,LoadImageTag,row,image->rows); if (proceed == MagickFalse) status=MagickFalse; } offset=row++; } if (count != (ssize_t) extent) status=MagickFalse; q=QueueCacheViewAuthenticPixels(image_view,0,offset,image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } length=ImportQuantumPixels(image,image_view,quantum_info,quantum_type, pixels,exception); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); quantum_info=DestroyQuantumInfo(quantum_info); if (status == MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); } SetQuantumImageType(image,quantum_type); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D P X I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDPXImage() adds properties for the DPX image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDPXImage method is: % % unsigned long RegisterDPXImage(void) % */ ModuleExport unsigned long RegisterDPXImage(void) { MagickInfo *entry; static const char *DPXNote = { "Digital Moving Picture Exchange Bitmap, Version 2.0.\n" "See SMPTE 268M-2003 specification at http://www.smtpe.org\n" }; entry=SetMagickInfo("DPX"); entry->decoder=(DecodeImageHandler *) ReadDPXImage; entry->encoder=(EncodeImageHandler *) WriteDPXImage; entry->magick=(IsImageFormatHandler *) IsDPX; entry->description=ConstantString("SMPTE 268M-2003 (DPX 2.0)"); entry->note=ConstantString(DPXNote); entry->module=ConstantString("DPX"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D P X I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDPXImage() removes format registrations made by the % DPX module from the list of supported formats. % % The format of the UnregisterDPXImage method is: % % UnregisterDPXImage(void) % */ ModuleExport void UnregisterDPXImage(void) { (void) UnregisterMagickInfo("DPX"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D P X I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDPXImage() writes an image in DPX encoded image format. % % The format of the WriteDPXImage method is: % % MagickBooleanType WriteDPXImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline const char *GetDPXProperty(const ImageInfo *image_info, const Image *image,const char *property) { const char *value; value=GetImageOption(image_info,property); if (value != (const char *) NULL) return(value); return(GetImageProperty(image,property)); } static unsigned int DecodeSMPTEBits(const char *key) { char buffer[2]; register long i; unsigned int shift, value; value=0; shift=28; buffer[1]='\0'; for (i=0; (*key != 0) && (i < 11); i++) { if (isdigit((int) ((unsigned char) *key)) == 0) { key++; continue; } buffer[0]=(*key++); value|=(unsigned int) ((strtol(buffer,(char **) NULL,10)) << shift); shift-=4; } return(value); } static MagickBooleanType WriteDPXImage(const ImageInfo *image_info,Image *image) { const char *value; const StringInfo *profile; DPXInfo dpx; long y; MagickBooleanType status; MagickOffsetType offset; MagickStatusType flags; GeometryInfo geometry_info; QuantumInfo *quantum_info; QuantumType quantum_type; register const PixelPacket *p; register long i; size_t extent; ssize_t count; time_t seconds; unsigned char *pixels; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); /* Write file header. */ (void) ResetMagickMemory(&dpx,0,sizeof(dpx)); offset=0; dpx.file.magic=0x53445058U; offset+=WriteBlobLong(image,dpx.file.magic); dpx.file.image_offset=0x2000U; profile=GetImageProfile(image,"dpx:user.data"); if (profile != (StringInfo *) NULL) { dpx.file.image_offset+=(unsigned int) GetStringInfoLength(profile); dpx.file.image_offset=(((dpx.file.image_offset+0x2000-1)/0x2000)*0x2000); } offset+=WriteBlobLong(image,dpx.file.image_offset); (void) strncpy(dpx.file.version,"V2.0",sizeof(dpx.file.version)); offset+=WriteBlob(image,8,(unsigned char *) &dpx.file.version); dpx.file.file_size=(unsigned int) (4U*image->columns*image->rows+ dpx.file.image_offset); offset+=WriteBlobLong(image,dpx.file.file_size); dpx.file.ditto_key=1U; /* new frame */ offset+=WriteBlobLong(image,dpx.file.ditto_key); dpx.file.generic_size=0x00000680U; offset+=WriteBlobLong(image,dpx.file.generic_size); dpx.file.industry_size=0x00000180U; offset+=WriteBlobLong(image,dpx.file.industry_size); dpx.file.user_size=0; if (profile != (StringInfo *) NULL) { dpx.file.user_size+=(unsigned int) GetStringInfoLength(profile); dpx.file.user_size=(((dpx.file.user_size+0x2000-1)/0x2000)*0x2000); } offset+=WriteBlobLong(image,dpx.file.user_size); value=GetDPXProperty(image_info,image,"dpx:file.filename"); if (value != (const char *) NULL) (void) strncpy(dpx.file.filename,value,sizeof(dpx.file.filename)); offset+=WriteBlob(image,sizeof(dpx.file.filename),(unsigned char *) dpx.file.filename); seconds=time((time_t *) NULL); (void) FormatMagickTime(seconds,sizeof(dpx.file.timestamp), dpx.file.timestamp); offset+=WriteBlob(image,sizeof(dpx.file.timestamp),(unsigned char *) dpx.file.timestamp); (void) strncpy(dpx.file.creator,GetMagickVersion((unsigned long *) NULL), sizeof(dpx.file.creator)); value=GetDPXProperty(image_info,image,"dpx:file.creator"); if (value != (const char *) NULL) (void) strncpy(dpx.file.creator,value,sizeof(dpx.file.creator)); offset+=WriteBlob(image,sizeof(dpx.file.creator),(unsigned char *) dpx.file.creator); value=GetDPXProperty(image_info,image,"dpx:file.project"); if (value != (const char *) NULL) (void) strncpy(dpx.file.project,value,sizeof(dpx.file.project)); offset+=WriteBlob(image,sizeof(dpx.file.project),(unsigned char *) dpx.file.project); value=GetDPXProperty(image_info,image,"dpx:file.copyright"); if (value != (const char *) NULL) (void) strncpy(dpx.file.copyright,value, sizeof(dpx.file.copyright)); offset+=WriteBlob(image,sizeof(dpx.file.copyright),(unsigned char *) dpx.file.copyright); dpx.file.encrypt_key=(~0U); offset+=WriteBlobLong(image,dpx.file.encrypt_key); offset+=WriteBlob(image,sizeof(dpx.file.reserve),(unsigned char *) dpx.file.reserve); /* Write image header. */ dpx.image.orientation=0x00; /* left-to-right; top-to-bottom */ offset+=WriteBlobShort(image,dpx.image.orientation); dpx.image.number_elements=1; offset+=WriteBlobShort(image,dpx.image.number_elements); offset+=WriteBlobLong(image,(unsigned int) image->columns); offset+=WriteBlobLong(image,(unsigned int) image->rows); for (i=0; i < 8; i++) { dpx.image.image_element[i].data_sign=0U; offset+=WriteBlobLong(image,dpx.image.image_element[i].data_sign); dpx.image.image_element[i].low_data=0U; offset+=WriteBlobLong(image,dpx.image.image_element[i].low_data); dpx.image.image_element[i].low_quantity=0.0f; offset+=WriteBlobFloat(image,dpx.image.image_element[i].low_quantity); dpx.image.image_element[i].high_data=0U; offset+=WriteBlobLong(image,dpx.image.image_element[i].high_data); dpx.image.image_element[i].high_quantity=0.0f; offset+=WriteBlobFloat(image,dpx.image.image_element[i].high_quantity); dpx.image.image_element[i].descriptor=0; if (i == 0) dpx.image.image_element[i].descriptor=RGBComponentType; offset+=WriteBlobByte(image,dpx.image.image_element[i].descriptor); dpx.image.image_element[i].transfer=0; if (image->colorspace == LogColorspace) dpx.image.image_element[0].transfer=PrintingDensityColorimetric; offset+=WriteBlobByte(image,dpx.image.image_element[i].transfer); dpx.image.image_element[i].colorimetric=0; offset+=WriteBlobByte(image,dpx.image.image_element[i].colorimetric); dpx.image.image_element[i].bit_size=0; if (i == 0) dpx.image.image_element[i].bit_size=(unsigned char) image->depth; offset+=WriteBlobByte(image,dpx.image.image_element[i].bit_size); dpx.image.image_element[i].packing=0; if ((image->depth == 10) || (image->depth == 12)) dpx.image.image_element[i].packing=1; offset+=WriteBlobShort(image,dpx.image.image_element[i].packing); dpx.image.image_element[i].encoding=0; offset+=WriteBlobShort(image,dpx.image.image_element[i].encoding); dpx.image.image_element[i].data_offset=0U; if (i == 0) dpx.image.image_element[i].data_offset=dpx.file.image_offset; offset+=WriteBlobLong(image,dpx.image.image_element[i].data_offset); dpx.image.image_element[i].end_of_line_padding=0U; offset+=WriteBlobLong(image,dpx.image.image_element[i].end_of_line_padding); offset+=WriteBlobLong(image, dpx.image.image_element[i].end_of_image_padding); offset+=WriteBlob(image,sizeof(dpx.image.image_element[i].description), (unsigned char *) dpx.image.image_element[i].description); } offset+=WriteBlob(image,sizeof(dpx.image.reserve),(unsigned char *) dpx.image.reserve); /* Write orientation header. */ if ((image->rows != image->magick_rows) || (image->columns != image->magick_columns)) { /* These properties are not valid if image size changed. */ (void) DeleteImageProperty(image,"dpx:orientation.x_offset"); (void) DeleteImageProperty(image,"dpx:orientation.y_offset"); (void) DeleteImageProperty(image,"dpx:orientation.x_center"); (void) DeleteImageProperty(image,"dpx:orientation.y_center"); (void) DeleteImageProperty(image,"dpx:orientation.x_size"); (void) DeleteImageProperty(image,"dpx:orientation.y_size"); } dpx.orientation.x_offset=0U; value=GetDPXProperty(image_info,image,"dpx:orientation.x_offset"); if (value != (const char *) NULL) dpx.orientation.x_offset=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.orientation.x_offset); dpx.orientation.y_offset=0U; value=GetDPXProperty(image_info,image,"dpx:orientation.y_offset"); if (value != (const char *) NULL) dpx.orientation.y_offset=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.orientation.y_offset); dpx.orientation.x_center=0.0f; value=GetDPXProperty(image_info,image,"dpx:orientation.x_center"); if (value != (const char *) NULL) dpx.orientation.x_center=atof(value); offset+=WriteBlobFloat(image,dpx.orientation.x_center); dpx.orientation.y_center=0.0f; value=GetDPXProperty(image_info,image,"dpx:orientation.y_center"); if (value != (const char *) NULL) dpx.orientation.y_center=atof(value); offset+=WriteBlobFloat(image,dpx.orientation.y_center); dpx.orientation.x_size=0U; value=GetDPXProperty(image_info,image,"dpx:orientation.x_size"); if (value != (const char *) NULL) dpx.orientation.x_size=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.orientation.x_size); dpx.orientation.y_size=0U; value=GetDPXProperty(image_info,image,"dpx:orientation.y_size"); if (value != (const char *) NULL) dpx.orientation.y_size=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.orientation.y_size); value=GetDPXProperty(image_info,image,"dpx:orientation.filename"); if (value != (const char *) NULL) (void) strncpy(dpx.orientation.filename,value, sizeof(dpx.orientation.filename)); offset+=WriteBlob(image,sizeof(dpx.orientation.filename),(unsigned char *) dpx.orientation.filename); offset+=WriteBlob(image,sizeof(dpx.orientation.timestamp),(unsigned char *) dpx.orientation.timestamp); value=GetDPXProperty(image_info,image,"dpx:orientation.device"); if (value != (const char *) NULL) (void) strncpy(dpx.orientation.device,value, sizeof(dpx.orientation.device)); offset+=WriteBlob(image,sizeof(dpx.orientation.device),(unsigned char *) dpx.orientation.device); offset+=WriteBlob(image,sizeof(dpx.orientation.serial),(unsigned char *) dpx.orientation.serial); for (i=0; i < 4; i++) dpx.orientation.border[i]=0; value=GetDPXProperty(image_info,image,"dpx:orientation.border"); if (value != (const char *) NULL) { flags=ParseGeometry(value,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; dpx.orientation.border[0]=(unsigned short) (geometry_info.rho+0.5); dpx.orientation.border[1]=(unsigned short) (geometry_info.sigma+0.5); dpx.orientation.border[2]=(unsigned short) (geometry_info.xi+0.5); dpx.orientation.border[3]=(unsigned short) (geometry_info.psi+0.5); } for (i=0; i < 4; i++) offset+=WriteBlobShort(image,dpx.orientation.border[i]); for (i=0; i < 2; i++) dpx.orientation.aspect_ratio[i]=0U; value=GetDPXProperty(image_info,image,"dpx:orientation.aspect_ratio"); if (value != (const char *) NULL) { flags=ParseGeometry(value,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; dpx.orientation.aspect_ratio[0]=(unsigned int) (geometry_info.rho+0.5); dpx.orientation.aspect_ratio[1]=(unsigned int) (geometry_info.sigma+0.5); } for (i=0; i < 2; i++) offset+=WriteBlobLong(image,dpx.orientation.aspect_ratio[i]); offset+=WriteBlob(image,sizeof(dpx.orientation.reserve),(unsigned char *) dpx.orientation.reserve); /* Write film header. */ *dpx.film.id='\0'; value=GetDPXProperty(image_info,image,"dpx:film.id"); if (value != (const char *) NULL) (void) strncpy(dpx.film.id,value,sizeof(dpx.film.id)); offset+=WriteBlob(image,sizeof(dpx.film.id),(unsigned char *) dpx.film.id); *dpx.film.type='\0'; value=GetDPXProperty(image_info,image,"dpx:film.type"); if (value != (const char *) NULL) (void) strncpy(dpx.film.type,value,sizeof(dpx.film.type)); offset+=WriteBlob(image,sizeof(dpx.film.type),(unsigned char *) dpx.film.type); *dpx.film.offset='\0'; value=GetDPXProperty(image_info,image,"dpx:film.offset"); if (value != (const char *) NULL) (void) strncpy(dpx.film.offset,value,sizeof(dpx.film.offset)); offset+=WriteBlob(image,sizeof(dpx.film.offset),(unsigned char *) dpx.film.offset); *dpx.film.prefix='\0'; value=GetDPXProperty(image_info,image,"dpx:film.prefix"); if (value != (const char *) NULL) (void) strncpy(dpx.film.prefix,value,sizeof(dpx.film.prefix)); offset+=WriteBlob(image,sizeof(dpx.film.prefix),(unsigned char *) dpx.film.prefix); *dpx.film.count='\0'; value=GetDPXProperty(image_info,image,"dpx:film.count"); if (value != (const char *) NULL) (void) strncpy(dpx.film.count,value,sizeof(dpx.film.count)); offset+=WriteBlob(image,sizeof(dpx.film.count),(unsigned char *) dpx.film.count); *dpx.film.format='\0'; value=GetDPXProperty(image_info,image,"dpx:film.format"); if (value != (const char *) NULL) (void) strncpy(dpx.film.format,value,sizeof(dpx.film.format)); offset+=WriteBlob(image,sizeof(dpx.film.format),(unsigned char *) dpx.film.format); dpx.film.frame_position=0U; value=GetDPXProperty(image_info,image,"dpx:film.frame_position"); if (value != (const char *) NULL) dpx.film.frame_position=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.film.frame_position); dpx.film.sequence_extent=0U; value=GetDPXProperty(image_info,image,"dpx:film.sequence_extent"); if (value != (const char *) NULL) dpx.film.sequence_extent=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.film.sequence_extent); dpx.film.held_count=0U; value=GetDPXProperty(image_info,image,"dpx:film.held_count"); if (value != (const char *) NULL) dpx.film.held_count=(unsigned int) atoi(value); offset+=WriteBlobLong(image,dpx.film.held_count); dpx.film.frame_rate=0.0f; value=GetDPXProperty(image_info,image,"dpx:film.frame_rate"); if (value != (const char *) NULL) dpx.film.frame_rate=atof(value); offset+=WriteBlobFloat(image,dpx.film.frame_rate); dpx.film.shutter_angle=0.0f; value=GetDPXProperty(image_info,image,"dpx:film.shutter_angle"); if (value != (const char *) NULL) dpx.film.shutter_angle=atof(value); offset+=WriteBlobFloat(image,dpx.film.shutter_angle); *dpx.film.frame_id='\0'; value=GetDPXProperty(image_info,image,"dpx:film.frame_id"); if (value != (const char *) NULL) (void) strncpy(dpx.film.frame_id,value,sizeof(dpx.film.frame_id)); offset+=WriteBlob(image,sizeof(dpx.film.frame_id),(unsigned char *) dpx.film.frame_id); value=GetDPXProperty(image_info,image,"dpx:film.slate"); if (value != (const char *) NULL) (void) strncpy(dpx.film.slate,value,sizeof(dpx.film.slate)); offset+=WriteBlob(image,sizeof(dpx.film.slate),(unsigned char *) dpx.film.slate); offset+=WriteBlob(image,sizeof(dpx.film.reserve),(unsigned char *) dpx.film.reserve); /* Write television header. */ value=GetDPXProperty(image_info,image,"dpx:television.time.code"); if (value != (const char *) NULL) dpx.television.time_code=DecodeSMPTEBits(value); offset+=WriteBlobLong(image,dpx.television.time_code); value=GetDPXProperty(image_info,image,"dpx:television.user.bits"); if (value != (const char *) NULL) dpx.television.user_bits=DecodeSMPTEBits(value); offset+=WriteBlobLong(image,dpx.television.user_bits); value=GetDPXProperty(image_info,image,"dpx:television.interlace"); if (value != (const char *) NULL) dpx.television.interlace=(unsigned char) atoi(value); offset+=WriteBlobByte(image,dpx.television.interlace); value=GetDPXProperty(image_info,image,"dpx:television.field_number"); if (value != (const char *) NULL) dpx.television.field_number=(unsigned char) atoi(value); offset+=WriteBlobByte(image,dpx.television.field_number); dpx.television.video_signal=0; value=GetDPXProperty(image_info,image,"dpx:television.video_signal"); if (value != (const char *) NULL) dpx.television.video_signal=(unsigned char) atoi(value); offset+=WriteBlobByte(image,dpx.television.video_signal); dpx.television.padding=0; value=GetDPXProperty(image_info,image,"dpx:television.padding"); if (value != (const char *) NULL) dpx.television.padding=(unsigned char) atoi(value); offset+=WriteBlobByte(image,dpx.television.padding); dpx.television.horizontal_sample_rate=0.0f; value=GetDPXProperty(image_info,image, "dpx:television.horizontal_sample_rate"); if (value != (const char *) NULL) dpx.television.horizontal_sample_rate=atof(value); offset+=WriteBlobFloat(image,dpx.television.horizontal_sample_rate); dpx.television.vertical_sample_rate=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.vertical_sample_rate"); if (value != (const char *) NULL) dpx.television.vertical_sample_rate=atof(value); offset+=WriteBlobFloat(image,dpx.television.vertical_sample_rate); dpx.television.frame_rate=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.frame_rate"); if (value != (const char *) NULL) dpx.television.frame_rate=atof(value); offset+=WriteBlobFloat(image,dpx.television.frame_rate); dpx.television.time_offset=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.time_offset"); if (value != (const char *) NULL) dpx.television.time_offset=atof(value); offset+=WriteBlobFloat(image,dpx.television.time_offset); dpx.television.gamma=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.gamma"); if (value != (const char *) NULL) dpx.television.gamma=atof(value); offset+=WriteBlobFloat(image,dpx.television.gamma); dpx.television.black_level=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.black_level"); if (value != (const char *) NULL) dpx.television.black_level=atof(value); offset+=WriteBlobFloat(image,dpx.television.black_level); dpx.television.black_gain=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.black_gain"); if (value != (const char *) NULL) dpx.television.black_gain=atof(value); offset+=WriteBlobFloat(image,dpx.television.black_gain); dpx.television.break_point=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.break_point"); if (value != (const char *) NULL) dpx.television.break_point=atof(value); offset+=WriteBlobFloat(image,dpx.television.break_point); dpx.television.white_level=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.white_level"); if (value != (const char *) NULL) dpx.television.white_level=atof(value); offset+=WriteBlobFloat(image,dpx.television.white_level); dpx.television.integration_times=0.0f; value=GetDPXProperty(image_info,image,"dpx:television.integration_times"); if (value != (const char *) NULL) dpx.television.integration_times=atof(value); offset+=WriteBlobFloat(image,dpx.television.integration_times); offset+=WriteBlob(image,sizeof(dpx.television.reserve),(unsigned char *) dpx.television.reserve); /* Write user header. */ value=GetDPXProperty(image_info,image,"dpx:user.id"); if (value != (const char *) NULL) (void) strncpy(dpx.user.id,value,sizeof(dpx.user.id)); offset+=WriteBlob(image,sizeof(dpx.user.id),(unsigned char *) dpx.user.id); if (profile != (StringInfo *) NULL) offset+=WriteBlob(image,GetStringInfoLength(profile), GetStringInfoDatum(profile)); while (offset < (MagickOffsetType) dpx.image.image_element[0].data_offset) offset+=WriteBlobByte(image,0x00); /* Convert pixel packets to DPX raster image. */ quantum_info=AcquireQuantumInfo(image_info,image); quantum_info->quantum=32; quantum_info->pack=dpx.image.image_element[0].packing == 0 ? MagickTrue : MagickFalse; quantum_type=RGBQuantum; extent=GetBytesPerRow(image->columns,3,image->depth,MagickTrue); if (0) { quantum_type=GrayQuantum; extent=GetBytesPerRow(image->columns,3,image->depth,MagickTrue); } pixels=GetQuantumPixels(quantum_info); for (y=0; y < (long) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const ViewInfo *) NULL,quantum_info, quantum_type,pixels,&image->exception); count=WriteBlob(image,extent,pixels); if (count != (ssize_t) extent) break; status=SetImageProgress(image,SaveImageTag,y,image->rows); if (status == MagickFalse) break; } quantum_info=DestroyQuantumInfo(quantum_info); (void) CloseBlob(image); return(status); }
digest.c
/* * Copyright 2010 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Authors: * "Daniel Kopecek" <dkopecek@redhat.com> */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdarg.h> #include <stddef.h> #include <stdint.h> #include <unistd.h> #include <assume.h> #include <errno.h> #include "crapi.h" #include "digest.h" #include "md5.h" #include "sha1.h" #include "sha2.h" #include "rmd160.h" int crapi_digest_fd (int fd, crapi_alg_t alg, void *dst, size_t *size) { assume_r(dst != NULL, -1, errno = EFAULT;); assume_r(size != NULL, -1, errno = EFAULT;); switch (alg) { case CRAPI_DIGEST_MD5: return crapi_md5_fd (fd, dst, size); case CRAPI_DIGEST_SHA1: return crapi_sha1_fd (fd, dst, size); case CRAPI_DIGEST_SHA224: return crapi_sha224_fd (fd, dst, size); case CRAPI_DIGEST_SHA256: return crapi_sha256_fd (fd, dst, size); case CRAPI_DIGEST_SHA384: return crapi_sha384_fd (fd, dst, size); case CRAPI_DIGEST_SHA512: return crapi_sha512_fd (fd, dst, size); case CRAPI_DIGEST_RMD160: return crapi_rmd160_fd (fd, dst, size); } errno = EINVAL; return (-1); } int crapi_mdigest_fd (int fd, int num, ... /* crapi_alg_t alg, void *dst, size_t *size, ...*/) { register int i; va_list ap; struct digest_ctbl_t ctbl[num]; crapi_alg_t alg; void *dst; size_t *size; uint8_t fd_buf[CRAPI_IO_BUFSZ]; ssize_t ret; assume_r (num > 0, -1, errno = EINVAL;); assume_r (fd > 0, -1, errno = EINVAL;); for (i = 0; i < num; ++i) ctbl[i].ctx = NULL; va_start (ap, num); for (i = 0; i < num; ++i) { alg = va_arg (ap, crapi_alg_t); dst = va_arg (ap, void *); size = va_arg (ap, size_t *); switch (alg) { case CRAPI_DIGEST_MD5: ctbl[i].init = &crapi_md5_init; ctbl[i].update = &crapi_md5_update; ctbl[i].fini = &crapi_md5_fini; ctbl[i].free = &crapi_md5_free; break; case CRAPI_DIGEST_SHA1: ctbl[i].init = &crapi_sha1_init; ctbl[i].update = &crapi_sha1_update; ctbl[i].fini = &crapi_sha1_fini; ctbl[i].free = &crapi_sha1_free; break; case CRAPI_DIGEST_SHA224: ctbl[i].init = &crapi_sha224_init; ctbl[i].update = &crapi_sha224_update; ctbl[i].fini = &crapi_sha224_fini; ctbl[i].free = &crapi_sha224_free; break; case CRAPI_DIGEST_SHA256: ctbl[i].init = &crapi_sha256_init; ctbl[i].update = &crapi_sha256_update; ctbl[i].fini = &crapi_sha256_fini; ctbl[i].free = &crapi_sha256_free; break; case CRAPI_DIGEST_SHA384: ctbl[i].init = &crapi_sha384_init; ctbl[i].update = &crapi_sha384_update; ctbl[i].fini = &crapi_sha384_fini; ctbl[i].free = &crapi_sha384_free; break; case CRAPI_DIGEST_SHA512: ctbl[i].init = &crapi_sha512_init; ctbl[i].update = &crapi_sha512_update; ctbl[i].fini = &crapi_sha512_fini; ctbl[i].free = &crapi_sha512_free; break; case CRAPI_DIGEST_RMD160: ctbl[i].init = &crapi_rmd160_init; ctbl[i].update = &crapi_rmd160_update; ctbl[i].fini = &crapi_rmd160_fini; ctbl[i].free = &crapi_rmd160_free; break; default: va_end (ap); goto fail; } if ((ctbl[i].ctx = ctbl[i].init (dst, size)) == NULL) *size = 0; } va_end (ap); while ((ret = read (fd, fd_buf, sizeof fd_buf)) == sizeof fd_buf) { #pragma omp parallel for for (i = 0; i < num; ++i) { if (ctbl[i].ctx == NULL) continue; if (ctbl[i].update (ctbl[i].ctx, fd_buf, sizeof fd_buf) != 0) { goto fail; } } } switch (ret) { case 0: break; case -1: goto fail; default: assume_r (ret > 0, -1, goto fail;); for (i = 0; i < num; ++i) { if (ctbl[i].ctx == NULL) continue; if (ctbl[i].update (ctbl[i].ctx, fd_buf, (size_t)ret) != 0) { goto fail; } } } for (i = 0; i < num; ++i) { if (ctbl[i].ctx == NULL) continue; ctbl[i].fini (ctbl[i].ctx); } return (0); fail: for (i = 0; i < num; ++i) if (ctbl[i].ctx != NULL) ctbl[i].free (ctbl[i].ctx); return (-1); }
GB_binop__rminus_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int64) // A*D function (colscale): GB (_AxD__rminus_int64) // D*A function (rowscale): GB (_DxB__rminus_int64) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_int64) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int64) // C=scalar+B GB (_bind1st__rminus_int64) // C=scalar+B' GB (_bind1st_tran__rminus_int64) // C=A+scalar GB (_bind2nd__rminus_int64) // C=A'+scalar GB (_bind2nd_tran__rminus_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LB_D1Q3_2-components.c
/** * @file LB_D1Q3_2-components.c * @author Kent S. Ridl * @date 2 December 2017 * * The module _LB_D1Q3_2-components.c_ contains the code to run a lattice Boltzmann simulation. It also contains the definitions of all externs from the header * file _LB_D1Q3_2-components.h_. */ #include "LB_D1Q3_2-components.h" // // Define the externs declared in the .h file // // Directory to read/write log files and scripts char *dataDirectory = ""; // Domain and phase information double theoreticalDensityA1 = 0.0; // theoretical densities and volumina/interface widths double theoreticalDensityA2 = 0.0; double theoreticalDensityA3 = 0.0; double theoreticalDensityB1 = 0.0; double theoreticalDensityB2 = 0.0; double theoreticalDensityB3 = 0.0; double theoreticalVolume1 = 0.0; double theoreticalVolume2 = 0.0; double theoreticalVolume3 = 0.0; double theoreticalPressure = 0; double theoreticalMuA = 0; double theoreticalMuB = 0; double interfaceWidth = XDIM / 20; // default to 5% of the lattice size double interfaceWidthForGivenKappa = XDIM / 20; int theoreticalPhases = 0; int initializeRandomComponents = 0; // default both components when random init int domain1 = XDIM / 4; // locations and widths of 4 possible domains int domain1Width = XDIM / 8; int domain2 = XDIM / 2; int domain2Width = XDIM / 8; int domain3 = 3 * XDIM / 4; int domain3Width = XDIM / 8; int domain4 = XDIM; int domain4Width = XDIM / 8; int phase1Index = 0; int phase2Index = 0; // Physical properties common to minimization and LB simulations double nA0 = 1.0; double nB0 = 1.0; double nAIntegrated = 0.0; // track total mass/densities double nBIntegrated = 0.0; double theta = 1./3.; double aA = 0.1; // VDW constants for each component, interaction double aB = 0.1; double aAB = 0.1; double bA = 1./3.; double bB = 1./3.; double vdwInteractionFactor = 0.5; // nu in paper double Amp=0.01; double tcA = 0.4; // critical point traits double ncA = 1.0; double pcA = 1.0; double tcB = 0.4; double ncB = 1.0; double pcB = 1.0; double oneOverTau = 1; // 1/tau from write-up (relaxation constant) double tau = 1; double g = 0; // gravitational acceleration term double lambda = 1.0; // friction (F12) coefficient // Free energy minimization control double minimizationParticlesStepSize = 0.01; double minimizationStepFactor = 0.5; //0.1; double invalidFreeEnergy = 1000.0; int sortABinodalByIncreasingB = 1; int sortBBinodalByIncreasingA = 1; int threePhaseRegionExists = 0; // assume default is no 3-phase region exist // 3-phase region properties and control double threePhaseRegion[6] = {0, 0, 0, 0, 0, 0}; // default to no 3-phase region in a phase diagram double rhoAThreePhaseRegion = 0.0; double rhoBThreePhaseRegion = 0.0; double maxArea = 0.0; double metastableThreshold = 0.01; //0.009; double metastableThresholdFactor = 20.0; int setThreePhaseRegion = 1; // need to set once program initialized int setLineAPoint = 1; // divide 3-phase and binary liquid regions int setLineBPoint = 1; // LB simulation initialization and runtime control double kappa = 0.1; // interfacial free energy double kappaFactor = 1.0; double gammaP = 1; // pressure coefficient for forcing rate double gammaMu = 0.1; // chemical potential coefficient for forcing rate double gammaFactor = 1.0; double endGammaFactor = 1000.0; int useTwoOrThreePhaseInitialization = 2; // default to 2-phase step profile int useStepOrRandomInitialization = 1; // default to step/tanh profile int useTheoreticalDensities = 1; // default to read in theory minimization densities int useTheoreticalVolumina = 0; // default to equal volumina for each domain int useTheoreticalPhase1 = 1; int useTheoreticalPhase2 = 1; int useTheoreticalPhase3 = 0; int suppressTheoreticalPhase1 = 0; int suppressTheoreticalPhase2 = 0; int isolateFourthPhase = 0; int overrideMinimumInterfaceWidth = 0; int goodInterfaceFit = 1; int fitInterfaceWidthToKappa = 0; int determineInterfaceWidthAutomatically = 1; int determineKappaAutomatically = 1; int kappaFactorDetermined = 0; int useChemicalPotentialForcingMethod = 2; int useBoundaryConditionsPeriodic = 1; // default periodic BCs int usePressurePartitionFunction = 1; // default to pressure derived from PF (not from construction) int useMuVdwInteraction = 1; int setPhaseDiagramTwoPhaseRegion = 1; int setPhaseDiagramThreePhaseRegion = 0; int checkTwoPhaseMetastableLBPoints = 1; // sub-choices for simulating 3-phase region int checkThreePhaseLBPoints = 1; int applyMomentumCorrection = 0; // For search tool to find parent from either minimization or LB sim double childRhoA = 1.0; double childRhoB = 1.0; // To specify a single test point to minimize (instead of a whole landscape of test points) double singlePointRhoA = 0.4; //1.0; double singlePointRhoB = 0.4; //1.0; // GUI control flags int next=0; int Pause=1; int run = 0; int done=0; int Repeat=100; int iterations; int tmp_phase_iterations = 0; int phase_iterations = 50000; void (*collision)(); ///< function pointer to choose the collision method for a LB simulation /** * @brief Function _calculateMass_ calculates the mass each cell over the lattice. It also performs a numerical integration of the mass over the lattice to help verify * the average densities of each component stay constant throughout a simulation. */ void calculateMass() { nAIntegrated = 0; nBIntegrated = 0; // Sweep across the lattice to conserve mass and determine the pressures at each cell #pragma omp parallel for reduction(+:nAIntegrated,nBIntegrated) for (int i = 0; i < XDIM; i++) { n1[i] = f1_0[i] + f1_1[i] + f1_2[i]; // 1st component mass density for this step n2[i] = f2_0[i] + f2_1[i] + f2_2[i]; // 2st component mass density for this step n[i] = n1[i] + n2[i]; // conservation of particles nAIntegrated += n1[i]; nBIntegrated += n2[i]; if (n[i] != 0) nReduced[i] = n1[i]*n2[i] / n[i]; else nReduced[i] = 0; } // end for nAIntegrated /= XDIM; nBIntegrated /= XDIM; } // end function calculateMass() /** * @brief Function _calculateVelocities_ calculates the mixture's mean velocity (a hydrodynamic variable) and the velocity for each component (non-hydrodynamic variables) * for each cell over the lattice. */ void calculateVelocities() { #pragma omp parallel for for (int i = 0; i < XDIM; i++) { if (n1[i] != 0) u1[i] = (f1_1[i]-f1_2[i]) / n1[i]; else u1[i] = 0.0; if (n2[i] != 0) u2[i] = (f2_1[i]-f2_2[i]) / n2[i]; else u2[i] = 0.0; if (n[i] != 0) u[i] = (n1[i]*u1[i] + n2[i]*u2[i]) / n[i]; // bulk average velocity else u[i] = 0; } // end for } // end function calculateVelocities() /** * @brief Function _correctVelocities_ applies the (0.5/rho*Force) correction factor to each component's velocity. Only the thermodynamic force from a * chemical potential gradient is used to correct the velocities. */ void correctVelocities() { #pragma omp parallel for for (int i = 0; i < XDIM; i++) { if (n1[i] != 0) uHat1[i] = u1[i] + 0.5/n1[i]*gradMuForce1[i]; //*F1[i]; // velocity correction needed for the forcing methods else uHat1[i] = 0.0; if (n2[i] != 0) uHat2[i] = u2[i] + 0.5/n2[i]*gradMuForce2[i]; //*F2[i]; // velocity correction needed for the forcing methods else uHat2[i] = 0.0; } // end for } // end function correctVelocities() /** * @brief Function _correctExcessMomentum_ calculates a force per particle correction term. The correction is applied to each lattice cell by * weighting the correction according to the density in that cell and subtracting from the actual force for each component applied to the cell. * This adjustment helps to correct for velocity errors that arise due to the discrete nature of the gradients used in force calculations. * * @note The global flag _applyMomentumCorrection_ controls whether or not this correction is applied. (default is off) */ void correctExcessMomentum() { double totalParticles = 0.0, totalForce = 0.0; double correction = 0.0; #pragma omp parallel for reduction(+:totalParticles,totalForce) for (int i = 0; i < XDIM; i++) { totalParticles += n1[i] + n2[i]; totalForce += F1[i] + F2[i]; } correction = totalForce / totalParticles; #pragma omp parallel for for (int i = 0; i < XDIM; i++) { F1[i] -= n1[i] * correction; F2[i] -= n2[i] * correction; } // static double totalF1 = 0.0, totalF2 = 0.0; // // // Total force per lattice site is correction applied // #pragma omp parallel for // for (int i = 0; i < XDIM; i++) { // F1[i] -= totalF1 / XDIM; //F1correction; // F2[i] -= totalF2 / XDIM; //F2correction; // } // // // Sum up new total forces for next iteration's correction application // totalF1 = 0; // totalF2 = 0; // #pragma omp parallel for reduction(+:totalF1,totalF2) // for (int i = 0; i < XDIM; i++) { // totalF1 += F1[i]; // totalF2 += F2[i]; // } } // end function correctExcessMomentum() /** * @brief Function _calculateLBPressure_ calculates pressure for the full 2-component mixture for each cell over the lattice. * * The pressure of the mixture is calculated in this function. The pressure includes gradient terms, and the components are coupled together and cannot be * separated into meaningful partial pressures. * * @note The global parameter _gammaP_ is a "filter" to modulate the pressure that is applied each time step and aid * in stabilizing the simulation. * @note The global parameter _usePressurePartitionFunction_ allows the user to select from two pressure formulations: one is a constructed pressure tensor * and one is derived from a partition. This feature was used in development and is preserved for "gee whiz" purposes. */ void calculateLBPressure() { #pragma omp parallel for for (int i = 0; i < XDIM; i++) { if (usePressurePartitionFunction) { pressure[i] = n[i]*theta*(1 + (bA*n1[i]+bB*n2[i])/(1.-bA*n1[i]-bB*n2[i])) - aA*n1[i]*n1[i] - 2*aAB*n1[i]*n2[i] - aB*n2[i]*n2[i]; } else { pressure[i] = n1[i]*theta/(1.-bA*n1[i]-bB*n2[i]) + n2[i]*theta/(1.-bA*n1[i]-bB*n2[i]) - aA*n1[i]*n1[i] - 2*aAB*n1[i]*n2[i] - aB*n2[i]*n2[i]; } // Gradient corrections for each single component, including self-interactions pressure[i] += -kappa*( n1[i]*laplace(n1,i) + 0.5*gradient(n1,i)*gradient(n1,i) + n2[i]*laplace(n2,i) + 0.5*gradient(n2,i)*gradient(n2,i) ); pressure[i] += kappa*( gradient(n1,i)*gradient(n1,i) + gradient(n2,i)*gradient(n2,i) ); // Gradient corrections for the cross terms (i.e. cross interactions) pressure[i] += -kappa*( n1[i]*laplace(n2,i) + n2[i]*laplace(n1,i) + gradient(n1,i)*gradient(n2,i) ); pressure[i] += kappa*( 2.*gradient(n1,i)*gradient(n2,i) ); pressure[i] *= gammaP; } } // end function calculateLBPressure() /** * @brief Function _calculateLBChemicalPotentials_ calculates the chemical potential for each component for each cell over the lattice. * * This function calculates the chemical potentials including gradient terms that are the basis for the LB forcing terms. It calculates both the full * chemical potentials and the non-ideal parts of the chemical potentials. */ void calculateLBChemicalPotentials() { #pragma omp parallel for for (int i = 0; i < XDIM; i++) { mu1[i] = gammaMu * ( theta*log(n1[i]/(1.-bA*n1[i]-bB*n2[i])) + theta*bA*(n1[i]+n2[i])/(1.-bA*n1[i]-bB*n2[i]) - 2*aA*n1[i] - 2*aAB*n2[i] - kappa*laplace(n1,i) - (useMuVdwInteraction ? vdwInteractionFactor : 1)*kappa*laplace(n2,i) ); mu2[i] = gammaMu * ( theta*log(n2[i]/(1.-bA*n1[i]-bB*n2[i])) + theta*bB*(n1[i]+n2[i])/(1.-bA*n1[i]-bB*n2[i]) - 2*aB*n2[i] - 2*aAB*n1[i] - kappa*laplace(n2,i) - (useMuVdwInteraction ? vdwInteractionFactor : 1)*kappa*laplace(n1,i) ); muNonIdeal1[i] = mu1[i] - theta*log(n1[i]); muNonIdeal2[i] = mu2[i] - theta*log(n2[i]); } } // end function calculateLBChemicalPotentials() /** * @brief Function _calculateFriction_ calculates the average momentum transfer (friction force) between components A and B. It then adds the * friction force to the conservative force from a chemical potential gradient to give the total force for each lattice cell. */ void calculateFriction() { #pragma omp parallel for for (int i = 0; i < XDIM; i++) { friction1[i] = nReduced[i] * (uHat1[i]-uHat2[i]); // friction forces on 1st component friction2[i] = nReduced[i] * (uHat2[i]-uHat1[i]); // friction forces on 2nd component // Final forcing for each component including friction F1[i] = gradMuForce1[i] - lambda*friction1[i]; F2[i] = gradMuForce2[i] - lambda*friction2[i]; } // end for } // end function calculateFriction() /** * @brief Function _collisionForcingNewChemicalPotentialGradient_ uses a chemical potential gradient forcing method for the lattice Boltzmann collision step. * * This function uses the chemical potentials of each component as the basis of the forcing in the LB collision step of the algorithm. There are 2 chemical * potential gradient formulations: * 1. The "nid" formulation is a gradient of the non-ideal portion of the chemical potential. * 2. The "log" forumlation is a gradient of the full chemical potential minus the gradient of an ideal pressure. * The global parameter _useChemicalPotentialForcingMethod_ is used to choose the formulation (1=nid, 2=log). * This function also applies a 4th order force correction to help insure thermodynamic consistency of the LB simulation (see reference). * * @see A. J. Wagner, Phys. Rev. E 74, 056703 (2006). */ void collisionForcingNewChemicalPotentialGradient() { static int lastMuForcingMethod = 0; // default to the first case below iterations++; calculateMass(); calculateVelocities(); calculateLBPressure(); calculateLBChemicalPotentials(); // // Forcing derived from chemical potential gradients... a la Gibbs-Duhem (sum over components of rho*gradMu equals pressure gradient) // if (useChemicalPotentialForcingMethod != lastMuForcingMethod) { // print out a statement when the forcing method changes lastMuForcingMethod = useChemicalPotentialForcingMethod; switch (useChemicalPotentialForcingMethod) { case 1: // grad non-ideal mu printf("\"nid\" grad-Mu forcing: -nx*grad(MuNidx)\n"); break; case 2: // gradient of mu minus ideal pressure printf("\"log\" grad-Mu forcing: -nx*grad(Mux)-theta*grad(nx)\n"); break; default: useChemicalPotentialForcingMethod = 2; lastMuForcingMethod = 2; printf("Invalid Selection! Defaulting to \"log\" grad-Mu forcing method 2: -nx*grad(Mux)-theta*grad(nx)\n"); } // end switch } // end if switch (useChemicalPotentialForcingMethod) { case 1: // grad non-ideal mu (nid) #pragma omp parallel for for (int i = 0; i < XDIM; i++) { gradMuForce1[i] = -1.*n1[i]*gradient(muNonIdeal1,i) + n1[i]*g; gradMuForce2[i] = -1.*n2[i]*gradient(muNonIdeal2,i) + n2[i]*g; } break; case 2: // gradient of mu minus ideal pressure/chemical potential gradient (log) #pragma omp parallel for for (int i = 0; i < XDIM; i++) { gradMuForce1[i] = -1. * ( n1[i]*gradient(mu1,i)-theta*gradient(n1,i) ) + n1[i]*g; gradMuForce2[i] = -1. * ( n2[i]*gradient(mu2,i)-theta*gradient(n2,i) ) + n2[i]*g; } break; //default: // do nothing... } // end switch correctVelocities(); // velocities are corrected by the conservative rho*gradMu force only (friction not included) calculateFriction(); if (applyMomentumCorrection) correctExcessMomentum(); #pragma omp parallel for for (int i = 0; i < XDIM; i++) { // Correction to the equilibrium distribution that alters the actual forcing if (n1[i] !=0) { psi1[i] = -oneOverTau * ( (tau-0.25)*F1[i]*F1[i]/n1[i] + (1./12.)*laplace(n1,i) ); // subtract psi, so minus sign relative to paper } else psi1[i] = 0; // Calculate particle densities at current lattice spot with forcing included f1_0[i] += oneOverTau * ( (n1[i] - n1[i]*theta - n1[i]*u1[i]*u1[i]) - f1_0[i] ) - ( 2.*F1[i]*u1[i] - psi1[i] ); f1_1[i] += oneOverTau * ( 0.5*(n1[i]*u1[i]*u1[i]+n1[i]*u1[i]+n1[i]*theta)-f1_1[i] ) - ( -F1[i]*u1[i] - 0.5*F1[i] + 0.5*psi1[i] ); f1_2[i] += oneOverTau * ( 0.5*(n1[i]*u1[i]*u1[i]-n1[i]*u1[i]+n1[i]*theta)-f1_2[i] ) - ( -F1[i]*u1[i] + 0.5*F1[i] + 0.5*psi1[i] ); // Correction to the equilibrium distribution that alters the actual PGF to pressure is constant in equilibirum if (n2[i] != 0) { psi2[i] = -oneOverTau * ( (tau-0.25)*F2[i]*F2[i]/n2[i] + (1./12.)*laplace(n2,i) ); // subtract psi, so minus sign relative to paper } else psi2[i] = 0; f2_0[i] += oneOverTau * ( (n2[i] - n2[i]*theta - n2[i]*u2[i]*u2[i]) - f2_0[i] ) - ( 2.*F2[i]*u2[i] - psi2[i] ); f2_1[i] += oneOverTau * ( 0.5*(n2[i]*u2[i]*u2[i] + n2[i]*u2[i] + n2[i]*theta) - f2_1[i] ) - ( -F2[i]*u2[i] - 0.5*F2[i] + 0.5*psi2[i] ); f2_2[i] += oneOverTau * ( 0.5*(n2[i]*u2[i]*u2[i] - n2[i]*u2[i] + n2[i]*theta) - f2_2[i] ) - ( -F2[i]*u2[i] + 0.5*F2[i] + 0.5*psi2[i] ); } // end for } // end function collisionForcingNewChemicalPotentialGradient() /** * @brief Function _setCollisionForcingNewChemicalPotentialGradient_ sets the forcing method used by the function _collision_ to be the gradient of a * chemical potential. */ void setCollisionForcingNewChemicalPotentialGradient() { collision = collisionForcingNewChemicalPotentialGradient; printf("Using gradMu forcing - new...\n"); } // end function setCollisionForcingNewChemicalPotentialGradient() /** * @brief Function _streaming_ is the streaming step of the lattice Boltzmann algorithm. * * The streaming step of the LB algorithm shifts the particle distributions for each component to "move" the particles. This function is specific to the * 1-D lattice and defaults to use periodic boundary conditions. If desired, the global flag _useBoundaryConditionsPeriodic_ may be toggled to change the * ends of the lattice to impose solid end points with bounce-back boundaries. */ void streaming() { double tmp; /* Original wrap-around end points */ tmp=f1_1[XDIM-1]; // save right end point memmove(&f1_1[1],&f1_1[0],(XDIM-1)*sizeof(double)); // shift all cells +1 f1_1[0]=tmp; // rotate former end to first lattice cell tmp=f1_2[0]; // save left end point memmove(&f1_2[0],&f1_2[1],(XDIM-1)*sizeof(double)); // shift all cells -1 f1_2[XDIM-1]=tmp; // rotate former first lattice cell to end tmp=f2_1[XDIM-1]; // save right end point memmove(&f2_1[1],&f2_1[0],(XDIM-1)*sizeof(double)); // shift all cells +1 f2_1[0]=tmp; // rotate former end to first lattice cell tmp=f2_2[0]; // save left end point memmove(&f2_2[0],&f2_2[1],(XDIM-1)*sizeof(double)); // shift all cells -1 f2_2[XDIM-1]=tmp; // rotate former first lattice cell to end // Walls at the end points; bounce from lattice origin (0) if (!useBoundaryConditionsPeriodic) { tmp = f1_1[0]; f1_1[0] = f1_2[XDIM-1]; f1_2[XDIM-1]=tmp; tmp = f2_1[0]; f2_1[0] = f2_2[XDIM-1]; f2_2[XDIM-1]=tmp; } } // end function streaming() /** * @brief Function _iteration_ is the governing function for each iteration of the lattice Boltzmann algorithm, executing the collision and streaming steps * in succession. */ void iteration(){ // Need to reset the critical and VDW constants each iteration // Keeps them all in sync if one is changed during a simulation pcA = 3.*tcA/8.; // determine pcA ncA = pcA / ((3./8.)*tcA); // fix ncA to be 1 pcB = (3./8.)*tcB*ncB; // determine pcB aA = (27./64.)*(tcA*tcA/pcA); aB = (27./64.)*(tcB*tcB/pcB); aAB = sqrt(aA*aB) * vdwInteractionFactor; bA = tcA/(8.*pcA); bB = tcB/(8.*pcB); collision(); streaming(); // When running manual simulations, automatically stop the simulation if this iteration blows up if ((!Pause || run) && (!stableSimulation(n1) || !stableSimulation(n2))) { Pause = 1; run = 0; } } // end function iteration() //void calculateFreeEnergyLattice(){ // int i = 0; // double excludedVolume = 0; // // // Sum over the lattice to determine total free energy given free energy densities at each lattice site //// #pragma omp parallel for private(excludedVolume) // for (i = 0; i < XDIM; i++) { // excludedVolume = n1[i]*bA + n2[i]*bB; // if ((n1[i] < 0) || (n2[i] < 0) || (excludedVolume > volumeTotal)) { // freeEnergy[i] = invalidFreeEnergy; // } // else if ((n1[i] == 0) && (n2[i] == 0)) { // freeEnergy[i] = 0; // } // else if (n1[i] == 0) { // freeEnergy[i] = n2[i]*theta*log(n2[i]/(volumeTotal-excludedVolume)) - aB*n2[i]*n2[i]/volumeTotal - theta*n2[i]; // } // else if (n2[i] == 0) { // freeEnergy[i] = n1[i]*theta*log(n1[i]/(volumeTotal-excludedVolume)) - aA*n1[i]*n1[i]/volumeTotal - theta*n1[i]; // } // else { // freeEnergy[i] = n1[i]*theta*log(n1[i]/(volumeTotal-excludedVolume)) + n2[i]*theta*log(n2[i]/(volumeTotal-excludedVolume)) // - aA*n1[i]*n1[i]/volumeTotal - 2*aAB*n2[i]*n1[i]/volumeTotal - aB*n2[i]*n2[i]/volumeTotal - theta*(n1[i]+n2[i]); // } // } // end for //} // end function calculateFreeEnergy() // // //void correctPressure() { // int i = 0; // // for (i = 0; i < XDIM; i++) { // pressureCorrection[i] = -(tau-0.25)*(F1[i]*F1[i]+F2[i]*F2[i])/n[i] + 0.25*(n1[i]*gradient(F1,i)+n2[i]*gradient(F2,i))/n[i] - (1./12.)*(n1[i]*laplace(n1,i)+n2[i]*laplace(n2,i))/n[i]; // -(tau-0.25)*(F1[i]*F1[i]+F2[i]*F2[i])/n[i] + 0.25*(n1[i]*F1[i]*F1[i]+n2[i]*F2[i]*F2[i])/n[i] // correctedPressure[i] = pressure[i] - pressureCorrection[i]; // } //}
symm_x_coo_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <alphasparse.h> #define CACHELINE 64 alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for (ALPHA_INT j = 0; j < columns; j++) { alpha_mul(y[i * ldy + j], y[i * ldy + j], beta); } ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number); ALPHA_INT block_num = (columns + block_size - 1) / block_size; if (num_threads > block_num) num_threads = block_num; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size; ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size; if (bch > columns) bch = columns; for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac > r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]); } else if (ac == r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) { alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unop__identity_int8_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int8_int64 // op(A') function: GB_unop_tran__identity_int8_int64 // C type: int8_t // A type: int64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = (int8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int8_int64 ( int8_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int8_t z = (int8_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint16) // op(A') function: GB (_unop_tran__identity_bool_uint16) // C type: bool // A type: uint16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint16) ( bool *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_bitmap_AxB_saxpy_A_bitmap_B_sparse_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_bitmap_B_sparse: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is bitmap, A is bitmap or full, B is sparse or hypersparse. // M has any format. { //-------------------------------------------------------------------------- // allocate workspace for each task //-------------------------------------------------------------------------- // imeta = total number of rows of A and H in all panels int64_t imeta = naslice * GB_PANEL_SIZE ; // number of entries in one panel of G for A. #if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING // Always load the A panel into G, since Ax [pA] has uninitialized values // where Ab [pA] == 0. The GB_BITMAP_MULTADD update will access these // values, and they must be initialized. const bool load_apanel = true ; #else // only load the A panel into G if it consists of more than one panel const bool load_apanel = (avlen > GB_PANEL_SIZE) ; #endif // Each panel of G is GB_PANEL_SIZE-by-avdim, held by column. int64_t apanel_size = load_apanel ? (GB_PANEL_SIZE * avdim) : 0 ; int64_t afpanel_size = GB_A_IS_BITMAP ? (apanel_size) : 0 ; int64_t axpanel_size = A_is_pattern ? 0 : (apanel_size * GB_ASIZE) ; // each panel of H is GB_PANEL_SIZE-by-bnvec, held by column; note that // H has bnvec vectors, not bvdim. The C bitmap has bvdim vectors, // and bnvec <= bvdim if B is hypersparse. int64_t hpanel_size = GB_PANEL_SIZE * bnvec ; //-------------------------------------------------------------------------- // allocate the panels //-------------------------------------------------------------------------- // The G panels are not needed if A would fit into a single panel. // In that case A is used in place and not copied into G. int64_t wafsize = naslice * afpanel_size ; int64_t waxsize = naslice * axpanel_size ; int64_t wcsize = naslice * hpanel_size ; int64_t wcxsize = GB_IS_ANY_PAIR_SEMIRING ? 0 : (wcsize * GB_CSIZE) ; Wf = GB_MALLOC_WERK (wafsize + wcsize, int8_t, &Wf_size) ; Wax = GB_MALLOC_WERK (waxsize, GB_void, &Wax_size) ; Wcx = GB_MALLOC_WERK (wcxsize, GB_void, &Wcx_size) ; if (Wf == NULL || Wax == NULL || Wcx == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // initialize the panels //-------------------------------------------------------------------------- // for all semirings: set the bitmaps Gb and Hf to zero GB_memset (Wf, 0, wafsize + wcsize, nthreads_max) ; #if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING { // Initialize the Hx workspace to identity, if this semiring has a // concise bitmap multiply-add expression. For the any_pair semiring, // the numerical values are not needed so Hx is not allocated. #if GB_HAS_IDENTITY_BYTE // the identity value can be assigned via memset GB_memset (Wcx, GB_IDENTITY_BYTE, wcxsize, nthreads_max) ; #else // an explicit loop is required to set Hx to identity // TODO: should each task initialize its own Hf and Hx, // and use a static schedule here and for H=G*B? GB_CTYPE *restrict Hx = (GB_CTYPE *) Wcx ; int64_t pH ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pH = 0 ; pH < wcsize ; pH++) { Hx [pH] = GB_IDENTITY ; } #endif } #endif //-------------------------------------------------------------------------- // C<#M>=A*B, one metapanel at a time //-------------------------------------------------------------------------- int tid ; for (int64_t iouter = 0 ; iouter < avlen ; iouter += imeta) { //---------------------------------------------------------------------- // C<#M>(metapanel,:) += A (metapanel,:)*B //---------------------------------------------------------------------- // The rows in this metapanel are iouter:iouter+imeta-1. //---------------------------------------------------------------------- // load the metapanel: G = A (iouter:iouter+imeta-1,:) //---------------------------------------------------------------------- if ((GB_A_IS_BITMAP || !A_is_pattern) && load_apanel) { // Loading the panel into G keeps its storage order. A is not // transposed when loaded into the G panels. However, the leading // dimension is reduced. A is avlen-by-avdim with a leading // dimension of avlen, which can be large. G is np-by-avdim, with // np <= GB_PANEL_SIZE. The loading of A into G can be skipped // if all of A can be used in-place. #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //-------------------------------------------------------------- // get the panel for this task //-------------------------------------------------------------- int a_tid = tid / nbslice ; int b_tid = tid % nbslice ; int64_t istart = iouter + a_tid * GB_PANEL_SIZE ; int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ; iend = GB_IMIN (iend, avlen) ; int64_t np = iend - istart ; if (np <= 0) continue ; int64_t kstart, kend ; GB_PARTITION (kstart, kend, avdim, b_tid, nbslice) ; int8_t *restrict Gb = Wf + (a_tid * afpanel_size) ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_ATYPE *restrict Gx = (GB_ATYPE *) (Wax + (a_tid * axpanel_size)) ; #endif //-------------------------------------------------------------- // load A for this panel //-------------------------------------------------------------- // TODO::: if A iso, only load a single entry into Gx, and use Gx as iso #if ( GB_A_IS_BITMAP ) { //---------------------------------------------------------- // A is bitmap //---------------------------------------------------------- if (!A_is_pattern) { // load Ab and Ax into Gb and Gx for (int64_t k = kstart ; k < kend ; k++) { for (int64_t ii = 0 ; ii < np ; ii++) { // Gb (ii,k) = Ab (istart+ii,k) const int64_t pG = ii + k*np ; const int64_t pA = istart + ii + k*avlen ; const int8_t gb = Ab [pA] ; Gb [pG] = gb ; if (gb) { // Gx (ii,k) = Ax (istart+ii,k) GB_LOADA (Gx, pG, Ax, pA, A_iso) ; } #if GB_HAS_BITMAP_MULTADD \ && ( !GB_IS_ANY_PAIR_SEMIRING ) else { // Gx (ii,k) = 0 Gx [pG] = GB_ATYPE_CAST (0, 0) ; } #endif } } } else { // just load the Ab bitmap into Gb, not the values for (int64_t k = kstart ; k < kend ; k++) { for (int64_t ii = 0 ; ii < np ; ii++) { // Gb (ii,k) = Ab (istart+ii,k) const int64_t pG = ii + k*np ; const int64_t pA = istart + ii + k*avlen ; Gb [pG] = Ab [pA] ; } } } } #else { //---------------------------------------------------------- // A is full //---------------------------------------------------------- #if ( !GB_IS_ANY_PAIR_SEMIRING ) if (!A_is_pattern) { for (int64_t k = kstart ; k < kend ; k++) { for (int64_t ii = 0 ; ii < np ; ii++) { // Gx (ii,k) = Ax (istart+ii,k) const int64_t pG = ii + k*np ; const int64_t pA = istart + ii + k*avlen ; GB_LOADA (Gx, pG, Ax, pA, A_iso) ; } } } #endif } #endif } } //---------------------------------------------------------------------- // H = G*B //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // get the panel of H and G for this task //------------------------------------------------------------------ int a_tid = tid / nbslice ; int b_tid = tid % nbslice ; int64_t istart = iouter + a_tid * GB_PANEL_SIZE ; int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ; iend = GB_IMIN (iend, avlen) ; int64_t np = iend - istart ; if (np <= 0) continue ; const int8_t *restrict Gb ; if (load_apanel) { // A has been loaded into the G panel Gb = Wf + (a_tid * afpanel_size) ; } else { // use A in-place Gb = Ab ; } int8_t *restrict Hf = Wf + (a_tid * hpanel_size) + wafsize ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) const GB_ATYPE *restrict Gx ; if (load_apanel) { // A has been loaded into the G panel Gx = (GB_ATYPE *) (Wax + (a_tid * axpanel_size)) ; } else { // use A in-place Gx = (GB_ATYPE *) Ax ; } GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (a_tid * hpanel_size) * GB_CSIZE) ; #endif GB_XINIT ; // for plus, bor, band, and bxor monoids only //------------------------------------------------------------------ // H_panel (:,kfirst:klast-1) = G_panel * B (:, kfirst:klast-1) //------------------------------------------------------------------ int64_t kfirst = B_slice [b_tid] ; int64_t klast = B_slice [b_tid + 1] ; for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // H_panel (:,kk) = G_panel * B (:,kk) //-------------------------------------------------------------- // H and B are indexed in the compact space kk = 0:bnvec-1, // not by the names j = 0:bvdim-1. When B is sparse, these are // the same. If B is hypersparse, j is Bh [kk]. However, j is // needed for the SECONDJ and SECONDJ1 multipliers. int64_t j = GBH (Bh, kk) ; int64_t pB = Bp [kk] ; int64_t pB_end = Bp [kk+1] ; int64_t pH = kk * np ; #if GB_IS_SECONDJ_MULTIPLIER // t = j or j+1 for SECONDJ and SECONDJ1 multipliers GB_CIJ_DECLARE (t) ; GB_MULT (t, ignore, ignore, ignore, ignore, j) ; #endif #undef GB_MULT_G_iik_B_kj #if GB_IS_PAIR_MULTIPLIER // t = G(ii,k) * B(k,j) is always equal to 1 #define GB_MULT_G_iik_B_kj(ii) #elif ( GB_IS_FIRSTJ_MULTIPLIER || GB_IS_SECONDJ_MULTIPLIER ) // t is already defined for these multipliers #define GB_MULT_G_iik_B_kj(ii) #else // t = G(ii,k) * B(k,j) #define GB_MULT_G_iik_B_kj(ii) \ GB_GETA (giik, Gx, pG + ii, false) ; \ GB_CIJ_DECLARE (t) ; \ GB_MULT (t, giik, bkj, istart + ii, k, j) #endif for ( ; pB < pB_end ; pB++) { int64_t k = Bi [pB] ; // get B(k,j) int64_t pG = k * np ; // get G(:,k) GB_GET_B_kj ; // bkj = B(k,j) GB_XLOAD (bkj) ; // X [1] = bkj (plus_times only) // H_panel (:,j) = G_panel (:,k) * B(k,j) for (int64_t ii = 0 ; ii < np ; ii++) { #if GB_HAS_BITMAP_MULTADD { // if (Gb (ii,k)) // if (Hf (ii,j) == 0) // Hx (ii,j) = G (ii,k) * B(k,j) ; // Hf (ii,j) = 1 // else // Hx (ii,j) += G (ii,k) * B(k,j) ; #if GB_IS_FIRSTI_MULTIPLIER int64_t i = istart + ii ; #endif #if GB_A_IS_BITMAP GB_BITMAP_MULTADD ( Hf [pH+ii], Hx [pH+ii], Gb [pG+ii], Gx [pG+ii], bkj) ; #else GB_BITMAP_MULTADD ( Hf [pH+ii], Hx [pH+ii], 1, Gx [pG+ii], bkj) ; #endif } #else { #if GB_A_IS_BITMAP if (Gb [pG+ii]) #endif { // t = G(ii,k) * B(k,j) GB_MULT_G_iik_B_kj (ii) ; if (Hf [pH+ii] == 0) { // H (ii,j) is a new entry GB_HX_WRITE (pH+ii, t) ; // Hx (ii,j)=t Hf [pH+ii] = 1 ; } else { // H (ii,j) is already present GB_HX_UPDATE (pH+ii, t) ; // Hx (ii,j)+=t } } } #endif } } #undef GB_MULT_G_iik_B_kj } } //---------------------------------------------------------------------- // C (metapanel,:) += H //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // get the panel of H and G for this task //------------------------------------------------------------------ int a_tid = tid / nbslice ; int b_tid = tid % nbslice ; int64_t istart = iouter + a_tid * GB_PANEL_SIZE ; int64_t iend = iouter + (a_tid+1) * GB_PANEL_SIZE ; iend = GB_IMIN (iend, avlen) ; int64_t np = iend - istart ; if (np <= 0) continue ; int64_t task_cnvals = 0 ; int64_t kstart, kend ; GB_PARTITION (kstart, kend, bnvec, b_tid, nbslice) ; int8_t *restrict Hf = Wf + (a_tid * hpanel_size) + wafsize ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (a_tid * hpanel_size) * GB_CSIZE) ; #endif //------------------------------------------------------------------ // C<#M>(metapanel,j1:j2-1) += H (:,kstart:kend-1) //------------------------------------------------------------------ // If B is hypersparse, the kk-th vector of H is the jth vector // of C, where j = Bh [kk]. for (int64_t kk = kstart ; kk < kend ; kk++) { int64_t j = GBH (Bh, kk) ; // j is the range j1:j2-1 int64_t pC_start = istart + j * avlen ; // get C(istart,j) int64_t pH_start = kk * np ; // get H(:,kk) for (int64_t ii = 0 ; ii < np ; ii++) { int64_t pC = pC_start + ii ; // get C(i,j) int64_t pH = pH_start + ii ; // get H(ii,kk) if (!Hf [pH]) continue ; Hf [pH] = 0 ; // clear the panel int8_t cb = Cb [pC] ; //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #undef GB_IF_MIJ #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse bool mij = ((cb & 2) != 0) ^ Mask_comp ; cb = (cb & 1) ; #define GB_IF_MIJ if (mij) #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; #define GB_IF_MIJ if (mij) #else #define GB_IF_MIJ #endif //---------------------------------------------------------- // C(i,j) += H(ii,kk) //---------------------------------------------------------- GB_IF_MIJ { if (cb == 0) { // C(i,j) = H(ii,kk) GB_CIJ_GATHER (pC, pH) ; Cb [pC] = keep ; task_cnvals++ ; } else { // Currently, the matrix C is a newly allocated // matrix, not the C_in input matrix to GrB_mxm. // As a result, this condition is not used. It // will be in the future when this method is // modified to modify C in-place. ASSERT (GB_DEAD_CODE) ; // C(i,j) += H(ii,kk) GB_CIJ_GATHER_UPDATE (pC, pH) ; } } //---------------------------------------------------------- // clear the panel //---------------------------------------------------------- #if GB_HAS_BITMAP_MULTADD && !GB_IS_ANY_PAIR_SEMIRING { // H(ii,kk) = identity Hx [pH] = GB_IDENTITY ; } #endif } } cnvals += task_cnvals ; } } } #undef GB_IF_MIJ
GB_subref_phase0.c
//------------------------------------------------------------------------------ // GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #include "GB_subref.h" #define GB_Ai(p) GB_UNFLIP (Ai [p]) //------------------------------------------------------------------------------ // GB_find_Ap_start_end //------------------------------------------------------------------------------ // Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector // A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense // vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then // pA and pA_end are set to -1 to denote an empty list. The resulting pointers // are then returned in Ap_start [kC] and Ap_end [kC]. static inline void GB_find_Ap_start_end ( // input, not modified const int64_t kA, const int64_t *restrict Ap, const int64_t *restrict Ai, const int64_t avlen, const int64_t imin, const int64_t imax, const int64_t kC, const int64_t nzombies, // output: Ap_start [kC] and Ap_end [kC]: int64_t *restrict Ap_start, int64_t *restrict Ap_end ) { //-------------------------------------------------------------------------- // get A(:,kA) //-------------------------------------------------------------------------- // printf ("kC: "GBd" kA: "GBd"\n", kA, kC) ; int64_t pA = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; int64_t ajnz = pA_end - pA ; // printf ("pA "GBd" pA_end "GBd"\n", pA, pA_end) ; //-------------------------------------------------------------------------- // trim it to A(imin:imax,kA) //-------------------------------------------------------------------------- if (ajnz == avlen) { //---------------------------------------------------------------------- // A (:,kA) is dense; use pA and pA_end as-is //---------------------------------------------------------------------- // printf ("A dense\n") ; ; } else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin) { //---------------------------------------------------------------------- // intersection of A(:,kA) and imin:imax is empty //---------------------------------------------------------------------- pA = -1 ; pA_end = -1 ; // printf ("A empty\n") ; } else { //---------------------------------------------------------------------- // A (:,kA) is sparse, with at least one entry //---------------------------------------------------------------------- // trim the leading part of A(:,kA) if (GB_Ai (pA) < imin) { bool found, is_zombie ; int64_t pright = pA_end - 1 ; GB_BINARY_SPLIT_ZOMBIE (imin, Ai, pA, pright, found, nzombies, is_zombie) ; } // trim the trailing part of A (:,kA) if (imin == imax) { if (GB_Ai (pA) == imin) { // found the the single entry A (i,kA) pA_end = pA + 1 ; } else { // A (i,kA) has not been found pA = -1 ; pA_end = -1 ; } } else if (imax < GB_Ai (pA_end-1)) { bool found, is_zombie ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; GB_BINARY_SPLIT_ZOMBIE (imax, Ai, pleft, pright, found, nzombies, is_zombie) ; pA_end = (found) ? (pleft + 1) : pleft ; } #ifdef GB_DEBUG ajnz = pA_end - pA ; if (ajnz > 0) { // A(imin:imax,kA) is now in Ai [pA:pA_end-1] ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ; ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ; ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; } #endif } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // The result [pA:pA_end-1] defines the range of entries that need to be // accessed for constructing C(:,kC). Ap_start [kC] = pA ; Ap_end [kC] = pA_end ; } //------------------------------------------------------------------------------ // GB_subref_phase0 //------------------------------------------------------------------------------ GrB_Info GB_subref_phase0 ( // output int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC] int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1 int64_t *p_Cnvec, // # of vectors in C bool *p_need_qsort, // true if C must be sorted int *p_Ikind, // kind of I int64_t *p_nI, // length of I int64_t Icolon [3], // for GB_RANGE, GB_STRIDE int64_t *p_nJ, // length of J // input, not modified const GrB_Matrix A, const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t ni, // length of I, or special const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t nj, // length of J, or special const bool must_sort, // true if C must be returned sorted GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_Ch != NULL) ; ASSERT (p_Ap_start != NULL) ; ASSERT (p_Ap_end != NULL) ; ASSERT (p_Cnvec != NULL) ; ASSERT (p_nJ != NULL) ; ASSERT (p_Ikind != NULL) ; ASSERT (p_nI != NULL) ; ASSERT (Icolon != NULL) ; ASSERT_OK (GB_check (A, "A for subref phase 0", GB0)) ; ASSERT (I != NULL) ; ASSERT (J != NULL) ; GrB_Info info ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed int64_t *restrict Ai = A->i ; int64_t anvec = A->nvec ; // may be trimmed int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t nzombies = A->nzombies ; //-------------------------------------------------------------------------- // check the properties of I and J //-------------------------------------------------------------------------- // C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1 int64_t nI, nJ, Jcolon [3] ; int Ikind, Jkind ; GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ; GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ; bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ; int64_t imin, imax, jmin, jmax ; // printf ("\n================================================= I:\n") ; info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon, &I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ; if (info != GrB_SUCCESS) { // I invalid return (info) ; } // printf ("\n================================================= J:\n") ; info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon, &J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ; if (info != GrB_SUCCESS) { // J invalid return (info) ; } bool need_qsort = I_unsorted ; // For the symbolic case, GB_subref must always return C sorted. For the // numeric case, GB_subref may return C with jumbled indices in each // vector, if C will be transposed later by GB_accum_mask. if (must_sort == false) { // The caller does not need C to be returned with sorted vectors. need_qsort = false ; } //-------------------------------------------------------------------------- // determine if C is empty //-------------------------------------------------------------------------- bool C_empty = (nI == 0 || nJ == 0) ; //-------------------------------------------------------------------------- // trim the hyperlist of A //-------------------------------------------------------------------------- // Ah, Ap, and anvec are modified to include just the vectors in range // jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap // pointers, and the scalar anvec. If J is ":", then jmin is zero and // jmax is avdim-1, so there is nothing to trim from Ah. If C is empty, // then Ah and Ap will not be accessed at all, so this can be skipped. bool A_is_hyper = A->is_hyper ; if (A_is_hyper && !C_empty) { //---------------------------------------------------------------------- // trim the leading end of Ah so that it starts with jmin:... //---------------------------------------------------------------------- if (jmin > 0) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SPLIT_SEARCH (jmin, Ah, kleft, kright, found) ; Ah += kleft ; Ap += kleft ; anvec -= kleft ; } //---------------------------------------------------------------------- // trim the trailing end of Ah so that it ends with ..:jmax //---------------------------------------------------------------------- if (jmax < avdim-1) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SPLIT_SEARCH (jmax, Ah, kleft, kright, found) ; anvec = (found) ? (kleft + 1) : kleft ; } // Ah has been trimmed ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax)); } // Ah may now be empty, after being trimmed C_empty = C_empty || (anvec == 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = 1, ntasks = 1 ; int max_ntasks = nthreads_max * 8 ; int64_t Count [max_ntasks+1] ; #define GB_GET_NTHREADS_AND_NTASKS(work) \ { \ nthreads = GB_nthreads (work, chunk, nthreads_max) ; \ ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; \ ntasks = GB_IMIN (ntasks, work) ; \ ntasks = GB_IMAX (ntasks, 1) ; \ } //-------------------------------------------------------------------------- // compute Cnvec and determine the format of Ch //-------------------------------------------------------------------------- // Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC] // if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and // jC == kC. jC is in the range 0 to nJ-1. int64_t *restrict Ch = NULL ; int64_t *restrict Ap_start = NULL ; int64_t *restrict Ap_end = NULL ; int64_t Cnvec = 0 ; int64_t jbegin = Jcolon [GxB_BEGIN] ; int64_t jinc = Jcolon [GxB_INC ] ; if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- Cnvec = nJ ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec. // so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed. Cnvec = anvec ; ASSERT (Cnvec <= nJ) ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend, but J is large //---------------------------------------------------------------------- // The case for Jkind == GB_STRIDE can be done by either this method, // or the one below. This takes O(anvec) time, and the one below // takes O(nj*log2(anvec)), so use this method if anvec < nj * 64. // Ch is a list of length Cnvec, where Cnvec is the length of // the intersection of Ah and jbegin:jinc:jend. // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; // scan all of Ah and check each entry if it appears in J #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end, my_Cnvec = 0 ; GB_PARTITION (kA_start, kA_end, anvec, (jinc > 0) ? tid : (ntasks-tid-1), ntasks) ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { my_Cnvec++ ; } } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1) ; Cnvec = Count [ntasks] ; } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:end //---------------------------------------------------------------------- // Ch is an explicit list: the intersection of Ah and J // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; // scan all of J and check each entry if it appears in Ah #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end, my_Cnvec = 0 ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) my_Cnvec++ ; } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1) ; Cnvec = Count [ntasks] ; } //-------------------------------------------------------------------------- // allocate Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- C_empty = C_empty || (Cnvec == 0) ; // C is hypersparse if A is hypersparse, or if C is empty bool C_is_hyper = A_is_hyper || C_empty ; // printf ("C_is_hyper %d C_empty %d\n", C_is_hyper, C_empty) ; if (C_is_hyper) { GB_MALLOC_MEMORY (Ch, Cnvec, sizeof (int64_t)) ; if (Ch == NULL) { return (GB_OUT_OF_MEMORY) ; } } if (Cnvec > 0) { GB_MALLOC_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ; GB_MALLOC_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ; if (Ap_start == NULL || Ap_end == NULL) { // out of memory GB_FREE_MEMORY (Ch, Cnvec, sizeof (int64_t)) ; GB_FREE_MEMORY (Ap_start, Cnvec, sizeof (int64_t)) ; GB_FREE_MEMORY (Ap_end, Cnvec, sizeof (int64_t)) ; return (GB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // create Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- // For the (kC)th vector of C, which corresponds to the (kA)th vector of A, // pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range // of entries in A(imin:imax,kA). if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t jC = 0 ; jC < nJ ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax, jC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is a shifted copy of the trimmed // Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t kC = 0 ; kC < Cnvec ; kC++) { int64_t kA = kC ; int64_t jA = Ah [kA] ; int64_t jC = jA - jmin ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend where jinc may be positive or negative //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning all // vectors in Ah [0..anvec-1] and checking if they appear in the // jbegin:jinc:jend sequence. if (jinc > 0) { #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } else { #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning the // list J, or the entire jbegin:jinc:jend sequence. Each vector is // then found in Ah, via binary search. #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) { ASSERT (jA == Ah [kA]) ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } //-------------------------------------------------------------------------- // check result //-------------------------------------------------------------------------- #ifdef GB_DEBUG for (int64_t kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) int64_t jC = (Ch == NULL) ? kC : Ch [kC] ; int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; // jA is the corresponding (kA)th vector of A. int64_t kA = 0 ; int64_t pright = A->nvec - 1 ; int64_t pA_start_all, pA_end_all ; bool found = GB_lookup (A->is_hyper, A->h, A->p, &kA, pright, jA, &pA_start_all, &pA_end_all) ; if (found && A->is_hyper) { ASSERT (jA == A->h [kA]) ; } // printf ("kC "GBd" jC "GBd" kA "GBd" jA "GBd" found %d\n", // kC, jC, kA, jA, found) ; int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t ajnz = pA_end - pA ; if (ajnz == avlen) { // A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector. // C(:,kC) will have exactly nI entries. ASSERT (pA == pA_start_all) ; ASSERT (pA_end == pA_end_all ) ; ; } else if (ajnz > 0) { // A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1] // printf ("pA_start_all "GBd" pA "GBd" pA_end "GBd" pA_end_all " // GBd"\n", pA_start_all, pA, pA_end, pA_end_all) ; ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ; } else { // A(imin:imax,kA) and C(:,kC) are empty ; } } #endif //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*p_Ch ) = Ch ; (*p_Ap_start ) = Ap_start ; (*p_Ap_end ) = Ap_end ; (*p_Cnvec ) = Cnvec ; (*p_need_qsort) = need_qsort ; (*p_Ikind ) = Ikind ; (*p_nI ) = nI ; (*p_nJ ) = nJ ; return (GrB_SUCCESS) ; }
sse.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> * 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com> * 2015 Brandon Rowlett <browlett@nvidia.com> * 2015 Ken Fast <kfast@gdeb.com> */ #if !defined(SIMDE_X86_SSE_H) #define SIMDE_X86_SSE_H #include "mmx.h" #if defined(_WIN32) #include <windows.h> #endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else SIMDE_ALIGN_TO_16 int8_t i8[16]; SIMDE_ALIGN_TO_16 int16_t i16[8]; SIMDE_ALIGN_TO_16 int32_t i32[4]; SIMDE_ALIGN_TO_16 int64_t i64[2]; SIMDE_ALIGN_TO_16 uint8_t u8[16]; SIMDE_ALIGN_TO_16 uint16_t u16[8]; SIMDE_ALIGN_TO_16 uint32_t u32[4]; SIMDE_ALIGN_TO_16 uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN_TO_16 simde_int128 i128[1]; SIMDE_ALIGN_TO_16 simde_uint128 u128[1]; #endif SIMDE_ALIGN_TO_16 simde_float32 f32[4]; SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)]; SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2]; SIMDE_ALIGN_TO_16 simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_ALIGN_TO_16 __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 int8x16_t neon_i8; SIMDE_ALIGN_TO_16 int16x8_t neon_i16; SIMDE_ALIGN_TO_16 int32x4_t neon_i32; SIMDE_ALIGN_TO_16 int64x2_t neon_i64; SIMDE_ALIGN_TO_16 uint8x16_t neon_u8; SIMDE_ALIGN_TO_16 uint16x8_t neon_u16; SIMDE_ALIGN_TO_16 uint32x4_t neon_u32; SIMDE_ALIGN_TO_16 uint64x2_t neon_u64; SIMDE_ALIGN_TO_16 float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN_TO_16 float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN_TO_16 v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; #if defined(SIMDE_X86_SSE_NATIVE) typedef __m128 simde__m128; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32x4_t simde__m128; #elif defined(SIMDE_WASM_SIMD128_NATIVE) typedef v128_t simde__m128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) typedef simde__m128 __m128; #endif HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect"); HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned"); #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_private(simde__m128_private v) { simde__m128 r; simde_memcpy(&r, &v, sizeof(r)); return r; } SIMDE_FUNCTION_ATTRIBUTES simde__m128_private simde__m128_to_private(simde__m128 v) { simde__m128_private r; simde_memcpy(&r, &v, sizeof(r)); return r; } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64) #endif #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32) #if defined(SIMDE_BUG_GCC_95782) SIMDE_FUNCTION_ATTRIBUTES SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128_to_altivec_f32(simde__m128 value) { simde__m128_private r_ = simde__m128_to_private(value); return r_.altivec_f32; } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) { simde__m128_private r_; r_.altivec_f32 = value; return simde__m128_from_private(r_); } #else SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32) #endif #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128); #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST, SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN, SIMDE_MM_ROUND_UP = _MM_ROUND_UP, SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO #else SIMDE_MM_ROUND_NEAREST = 0x0000, SIMDE_MM_ROUND_DOWN = 0x2000, SIMDE_MM_ROUND_UP = 0x4000, SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000 #endif }; #if defined(_MM_FROUND_TO_NEAREST_INT) # define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT # define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF # define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF # define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO # define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION # define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC # define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC #else # define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00 # define SIMDE_MM_FROUND_TO_NEG_INF 0x01 # define SIMDE_MM_FROUND_TO_POS_INF 0x02 # define SIMDE_MM_FROUND_TO_ZERO 0x03 # define SIMDE_MM_FROUND_CUR_DIRECTION 0x04 # define SIMDE_MM_FROUND_RAISE_EXC 0x00 # define SIMDE_MM_FROUND_NO_EXC 0x08 #endif #define SIMDE_MM_FROUND_NINT \ (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_FLOOR \ (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_CEIL \ (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_TRUNC \ (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_RINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_NEARBYINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC) #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT) # define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT # define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF # define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF # define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO # define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION # define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC # define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT # define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR # define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL # define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC # define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT # define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT #endif SIMDE_FUNCTION_ATTRIBUTES unsigned int SIMDE_MM_GET_ROUNDING_MODE(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _MM_GET_ROUNDING_MODE(); #elif defined(SIMDE_HAVE_FENV_H) unsigned int vfe_mode; switch (fegetround()) { #if defined(FE_TONEAREST) case FE_TONEAREST: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; #endif #if defined(FE_TOWARDZERO) case FE_TOWARDZERO: vfe_mode = SIMDE_MM_ROUND_DOWN; break; #endif #if defined(FE_UPWARD) case FE_UPWARD: vfe_mode = SIMDE_MM_ROUND_UP; break; #endif #if defined(FE_DOWNWARD) case FE_DOWNWARD: vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO; break; #endif default: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; } return vfe_mode; #else return SIMDE_MM_ROUND_NEAREST; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE() #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_ROUNDING_MODE(a); #elif defined(SIMDE_HAVE_FENV_H) int fe_mode = FE_TONEAREST; switch (a) { #if defined(FE_TONEAREST) case SIMDE_MM_ROUND_NEAREST: fe_mode = FE_TONEAREST; break; #endif #if defined(FE_TOWARDZERO) case SIMDE_MM_ROUND_TOWARD_ZERO: fe_mode = FE_TOWARDZERO; break; #endif #if defined(FE_DOWNWARD) case SIMDE_MM_ROUND_DOWN: fe_mode = FE_DOWNWARD; break; #endif #if defined(FE_UPWARD) case SIMDE_MM_ROUND_UP: fe_mode = FE_UPWARD; break; #endif default: return; } fesetround(fe_mode); #else (void) a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_mm_getcsr (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr(); #else return SIMDE_MM_GET_ROUNDING_MODE(); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_getcsr() simde_mm_getcsr() #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_setcsr (uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_setcsr(a); #else SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a)); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setcsr(a) simde_mm_setcsr(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_round_ps (simde__m128 a, int rounding) SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) { simde__m128_private r_, a_ = simde__m128_to_private(a); /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, * but in ARMv8+ the rounding mode is ignored and nearest is always * used, so we treat ARMv7 as having a rounding mode but ARMv8 as * not. */ #if \ defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \ defined(SIMDE_ARM_NEON_A32V8) if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; #endif switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndaq_f32(a_.neon_f32); #elif defined(simde_math_roundf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_roundf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_floorf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_truncf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; default: HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); } return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) #define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps(e3, e2, e1, e0); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 }; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); #else r_.f32[0] = e0; r_.f32[1] = e1; r_.f32[2] = e2; r_.f32[3] = e3; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps1 (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps1(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) (void) a; return vec_splats(a); #else return simde_mm_set_ps(a, a, a, a); #endif } #define simde_mm_set1_ps(a) simde_mm_set_ps1(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps1(a) simde_mm_set_ps1(a) # define _mm_set1_ps(a) simde_mm_set1_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_move_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_move_ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { 16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 + b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] + b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ps(a, b) simde_mm_add_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0); float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); // the upper values in the result must be the remnants of <a>. r_.neon_f32 = vaddq_f32(a_.neon_f32, value); #else r_.f32[0] = a_.f32[0] + b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ss(a, b) simde_mm_add_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_and_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_and_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 & b_.i32; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_and_ps(a, b) simde_mm_and_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_andnot_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_andnot_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32 & b_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]) & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_xor_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_xor_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] ^ b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_or_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_or_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] | b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_or_ps(a, b) simde_mm_or_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) { /* This function is for when you want to blend two elements together * according to a mask. It is similar to _mm_blendv_ps, except that * it is undefined whether the blend is based on the highest bit in * each lane (like blendv) or just bitwise operations. This allows * us to implement the function efficiently everywhere. * * Basically, you promise that all the lanes in mask are either 0 or * ~0. */ #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_ps(a, b, mask); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b), mask_ = simde__m128_to_private(mask); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u16); SIMDE_CONVERT_VECTOR_(wb, b_.u16); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u16, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b) # define _m_pavgw(a, b) simde_mm_avg_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u8); SIMDE_CONVERT_VECTOR_(wb, b_.u8); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u8, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b) # define _m_pavgb(a, b) simde_mm_avg_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_abs_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0)) return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_abs(a_.altivec_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_fabsf(a_.f32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpge_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpgt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION) /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) is missing from XL C/C++ v16.1.1, though the documentation (table 89 on page 432 of the IBM XL C/C++ for Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Note: NEON does not have ordered compare builtin Need to compare a eq a and b eq b to check for NaN Do AND of results to get final */ uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpunord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32))); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #else return a_.f32[0] == b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #else return a_.f32[0] >= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #else return a_.f32[0] > b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #else return a_.f32[0] <= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #else return a_.f32[0] < b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #else return a_.f32[0] != b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { simde__m128_private r_, dest_ = simde__m128_to_private(dest), src_ = simde__m128_to_private(src); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0))); r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) #if !defined(HEDLEY_IBM_VERSION) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f)); r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos); #elif defined(SIMDE_IEEE754_STORAGE) (void) src_; (void) dest_; simde__m128 sign_pos = simde_mm_set1_ps(-0.0f); r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]); } #endif return simde__m128_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) { return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_pi2ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0 SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_si2ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_si2ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); r_.i32[1] = a_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */ r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0)))); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { simde_float32 v = a_.i16[i]; r_.f32[i] = v; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32x2_ps(a, b); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32); SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32); #else r_.f32[0] = (simde_float32) a_.i32[0]; r_.f32[1] = (simde_float32) a_.i32[1]; r_.f32[2] = (simde_float32) b_.i32[0]; r_.f32[3] = (simde_float32) b_.i32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8)))); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]); r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]); r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]); r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi16 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi16(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi32(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi8 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi8(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471) /* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to * i16, combine with an all-zero vector of i16 (which will become the upper * half), narrow to i8. */ float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)); float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)); float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min)); r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)) r_.i8[i] = INT8_MAX; else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)) r_.i8[i] = INT8_MIN; else r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i])); } /* Note: the upper half is undefined */ #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (simde_float32) a_.u16[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8)))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtsi32_ss(a, b); #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtsi64_ss(a, b); #else return _mm_cvtsi64x_ss(a, b); #endif #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32 simde_mm_cvtss_f32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtss_f32(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_f32(a_.neon_f32, 0); #else return a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtss_si32 (simde__m128 a) { return simde_mm_cvt_ss2si(a); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvtss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtss_si64(a); #else return _mm_cvtss_si64x(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0))); #else return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); } #endif return simde__m64_from_private(r_); #endif } #define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a)) # define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtt_ss2si(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif #endif } #define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a)) # define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvttss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER) #if defined(__PGI) return _mm_cvttss_si64x(a); #else return _mm_cvttss_si64(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip0 = vrecpeq_f32(b_.neon_f32); float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] / b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ps(a, b) simde_mm_div_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = a_.f32[0] / b_.f32[0]; SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ss(a, b) simde_mm_div_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int16_t simde_mm_extract_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) # if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ # define simde_mm_extract_pi16(a, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8)) # define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private r_, a_ = simde__m64_to_private(a); r_.i64[0] = a_.i64[0]; r_.i16[imm8] = i; return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ # define ssimde_mm_insert_pi16(a, i, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ (_mm_insert_pi16((a), (i), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) # define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load1_ps (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_dup_f32(mem_addr); #else r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr)); #endif return simde__m128_from_private(r_); #endif } #define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr) # define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ss (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ss(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0); #else r_.f32[0] = *mem_addr; r_.i32[1] = 0; r_.i32[2] = 0; r_.i32[3] = 0; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr))); #else simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr); r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) #endif /* The SSE documentation says that there are no alignment requirements for mem_addr. Unfortunately they used the __m64 type for the argument which is supposed to be 8-byte aligned, so some compilers (like clang with -Wcast-align) will generate a warning if you try to cast, say, a simde_float32* to a simde__m64* for this function. I think the choice of argument type is unfortunate, but I do think we need to stick to it here. If there is demand I can always add something like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vld1_f32( HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32)); #else simde__m64_private b_; simde_memcpy(&b_, mem_addr, sizeof(b_)); r_.i32[0] = b_.i32[0]; r_.i32[1] = b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else simde__m128_private r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrev64q_f32(v_.neon_f32); r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_reve(v_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); #else r_.f32[0] = v_.f32[3]; r_.f32[1] = v_.f32[2]; r_.f32[2] = v_.f32[1]; r_.f32[3] = v_.f32[0]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadu_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #else simde_memcpy(&r_, mem_addr, sizeof(r_)); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr)); #else simde__m64_private a_ = simde__m64_to_private(a), mask_ = simde__m64_to_private(mask); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) if (mask_.i8[i] < 0) mem_addr[i] = a_.i8[i]; #endif } #define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) # define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b) # define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ps(a, b) simde_mm_max_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b) # define _m_pmaxub(a, b) simde_mm_max_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ss(a, b) simde_mm_max_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b) # define _m_pminsw(a, b) simde_mm_min_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); #else r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); #endif return simde__m128_from_private(r_); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); #else r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); #endif return simde__m128_from_private(r_); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) simde__m128 mask = simde_mm_cmplt_ps(a, b); return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ps(a, b) simde_mm_min_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminub(a, b) simde_mm_min_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b) # define _m_pminub(a, b) simde_mm_min_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ss(a, b) simde_mm_min_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movehl_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movehl_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergel(b_.altivec_i64, a_.altivec_i64)); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else r_.f32[0] = b_.f32[2]; r_.f32[1] = b_.f32[3]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movelh_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_mergeh(a_.altivec_i64, b_.altivec_i64)); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_pi8 (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_pi8(a); #else simde__m64_private a_ = simde__m64_to_private(a); int r = 0; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) uint8x8_t input = a_.neon_u8; const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0}; const uint8x8_t mask_and = vdup_n_u8(0x80); const int8x8_t mask_shift = vld1_s8(xr); const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift); uint8x8_t lo = mask_result; r = vaddv_u8(lo); #else const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]); SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < nmemb ; i++) { r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i); } #endif return r; #endif } #define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a) # define _m_pmovmskb(a) simde_mm_movemask_pi8(a) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_ps(a); #else int r = 0; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) static const int32_t shift_amount[] = { 0, 1, 2, 3 }; const int32x4_t shift = vld1q_s32(shift_amount); uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) { r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i; } #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_ps(a) simde_mm_movemask_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] * b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] * b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_mulhi_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16); const uint32x4_t t2 = vshrq_n_u32(t1, 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16))); } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b) # define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch (char const* p, int i) { #if defined(HEDLEY_GCC_VERSION) __builtin_prefetch(p); #else (void) p; #endif (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) # define simde_mm_prefetch(p, i) _mm_prefetch(p, i) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_negate_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip = vrecpeq_f32(a_.neon_f32); #if SIMDE_ACCURACY_PREFERENCE > 0 for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) { recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32)); } #endif r_.neon_f32 = recip; #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_re(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.f32 = 1.0f / a_.f32; #elif defined(SIMDE_IEEE754_STORAGE) /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { int32_t ix; simde_float32 fx = a_.f32[i]; simde_memcpy(&ix, &fx, sizeof(ix)); int32_t x = INT32_C(0x7EF311C3) - ix; simde_float32 temp; simde_memcpy(&temp, &x, sizeof(temp)); r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); } #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ps(a) simde_mm_rcp_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); r_.f32[0] = 1.0f / a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ss(a) simde_mm_rcp_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_rsqrte(a_.altivec_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); #else simde_float32 x = a_.f32[i]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[i] = x; #endif } #elif defined(simde_math_sqrtf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_IEEE754_STORAGE) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1); #else simde_float32 x = a_.f32[0]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[0] = x; #endif } r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #elif defined(simde_math_sqrtf) r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_sad_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); uint16_t r0 = t[0] + t[1] + t[2] + t[3]; r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); #else uint16_t sum = 0; #if defined(SIMDE_HAVE_STDLIB_H) SIMDE_VECTORIZE_REDUCTION(+:sum) for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); } r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); r_.i16[1] = 0; r_.i16[2] = 0; r_.i16[3] = 0; #else HEDLEY_UNREACHABLE(); #endif #endif return simde__m64_from_private(r_); #endif } #define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b) # define _m_psadbw(a, b) simde_mm_sad_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ss (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ss(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0); #else return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ss(a) simde_mm_set_ss(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setr_ps(e3, e2, e1, e0); #else return simde_mm_set_ps(e0, e1, e2, e3); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setzero_ps (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setzero_ps(); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(SIMDE_FLOAT32_C(0.0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_splats(SIMDE_FLOAT32_C(0.0)); #else simde__m128 r; simde_memset(&r, 0, sizeof(r)); return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setzero_ps() simde_mm_setzero_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_undefined_ps (void) { simde__m128_private r_; #if defined(SIMDE_HAVE_UNDEFINED128) r_.n = _mm_undefined_ps(); #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) r_ = simde__m128_to_private(simde_mm_setzero_ps()); #endif return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_undefined_ps() simde_mm_undefined_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_setone_ps (void) { simde__m128 t = simde_mm_setzero_ps(); return simde_mm_cmpeq_ps(t, t); } SIMDE_FUNCTION_ATTRIBUTES void simde_mm_sfence (void) { /* TODO: Use Hedley. */ #if defined(SIMDE_X86_SSE_NATIVE) _mm_sfence(); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) __atomic_thread_fence(__ATOMIC_SEQ_CST); #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) __atomic_thread_fence(__ATOMIC_SEQ_CST); #else atomic_thread_fence(memory_order_seq_cst); #endif #elif defined(_MSC_VER) MemoryBarrier(); #elif HEDLEY_HAS_EXTENSION(c_atomic) __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) __sync_synchronize(); #elif defined(_OPENMP) #pragma omp critical(simde_mm_sfence_) { } #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sfence() simde_mm_sfence() #endif #define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \ const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \ simde__m64_from_private((simde__m64_private) { .i16 = \ SIMDE_SHUFFLE_VECTOR_(16, 8, \ (simde__tmp_a_).i16, \ (simde__tmp_a_).i16, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3)) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_shuffle_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) { r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3]; } HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wconditional-uninitialized") # pragma clang diagnostic ignored "-Wconditional-uninitialized" #endif return simde__m64_from_private(r_); HEDLEY_DIAGNOSTIC_POP } #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8) #else # define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8) # define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) && 0 # define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ simde__m128_from_private((simde__m128_private) { .f32 = \ SIMDE_SHUFFLE_VECTOR_(32, 16, \ simde__m128_to_private(a).f32, \ simde__m128_to_private(b).f32, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[(imm8 >> 0) & 3]; r_.f32[1] = a_.f32[(imm8 >> 2) & 3]; r_.f32[2] = b_.f32[(imm8 >> 4) & 3]; r_.f32[3] = b_.f32[(imm8 >> 6) & 3]; return simde__m128_from_private(r_); } #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vsqrtq_f32(a_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t est = vrsqrteq_f32(a_.neon_f32); for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) { est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est); } r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_sqrt(a_.altivec_f32); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) { r_.f32[i] = simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #elif defined(simde_math_sqrtf) r_.f32[0] = simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else simde_memcpy(mem_addr, &a_, sizeof(a)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps1(mem_addr_, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_); #elif defined(SIMDE_SHUFFLE_VECTOR_) simde__m128_private tmp_; tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0); simde_mm_store_ps(mem_addr_, tmp_.f32); #else SIMDE_VECTORIZE_ALIGNED(mem_addr_:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr_[i] = a_.f32[0]; } #endif #endif } #define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) # define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ss(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_lane_f32(mem_addr, a_.neon_f32, 0); #else *mem_addr = a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32)); #else simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->neon_f32 = vget_low_f32(a_.neon_f32); #else dest_->f32[0] = a_.f32[0]; dest_->f32[1] = a_.f32[1]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storer_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) vec_st(vec_reve(a_.altivec_f32), 0, mem_addr); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t tmp = vrev64q_f32(a_.neon_f32); vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2)); #elif defined(SIMDE_SHUFFLE_VECTOR_) a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeu_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) vec_vsx_st(a_.altivec_f32, 0, mem_addr); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] - b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] - b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] == b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] == b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] >= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] >= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] > b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] > b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] <= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] <= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] < b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] < b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] != b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] != b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b)) #endif #if defined(SIMDE_X86_SSE_NATIVE) # if defined(__has_builtin) # if __has_builtin(__builtin_ia32_undef128) # define SIMDE_HAVE_UNDEFINED128 # endif # elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) # define SIMDE_HAVE_UNDEFINED128 # endif #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpackhi_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_high_f32(a_.neon_f32); float32x2_t b1 = vget_high_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7); #else r_.f32[0] = a_.f32[2]; r_.f32[1] = b_.f32[2]; r_.f32[2] = a_.f32[3]; r_.f32[3] = b_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpacklo_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_low_f32(a_.neon_f32); float32x2_t b1 = vget_low_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = b_.f32[0]; r_.f32[2] = a_.f32[1]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr), a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest->i64[0] = vget_lane_s64(a_.neon_i64, 0); #else dest->i64[0] = a_.i64[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_stream_ps(mem_addr, a); #elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS) simde__m128_private a_ = simde__m128_to_private(a); __builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr)); #else simde_mm_store_ps(mem_addr, a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ vget_low_f32(ROW23.val[0])); \ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ vget_low_f32(ROW23.val[1])); \ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ vget_high_f32(ROW23.val[0])); \ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ vget_high_f32(ROW23.val[1])); \ } while (0) #else #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ simde__m128 tmp3, tmp2, tmp1, tmp0; \ tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ row0 = simde_mm_movelh_ps(tmp0, tmp2); \ row1 = simde_mm_movehl_ps(tmp2, tmp0); \ row2 = simde_mm_movelh_ps(tmp1, tmp3); \ row3 = simde_mm_movehl_ps(tmp3, tmp1); \ } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) #endif #if defined(_MM_EXCEPT_INVALID) # define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID #else # define SIMDE_MM_EXCEPT_INVALID (0x0001) #endif #if defined(_MM_EXCEPT_DENORM) # define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM #else # define SIMDE_MM_EXCEPT_DENORM (0x0002) #endif #if defined(_MM_EXCEPT_DIV_ZERO) # define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO #else # define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004) #endif #if defined(_MM_EXCEPT_OVERFLOW) # define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW #else # define SIMDE_MM_EXCEPT_OVERFLOW (0x0008) #endif #if defined(_MM_EXCEPT_UNDERFLOW) # define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW #else # define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010) #endif #if defined(_MM_EXCEPT_INEXACT) # define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT #else # define SIMDE_MM_EXCEPT_INEXACT (0x0020) #endif #if defined(_MM_EXCEPT_MASK) # define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK #else # define SIMDE_MM_EXCEPT_MASK \ (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \ SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \ SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT) #endif #if defined(_MM_MASK_INVALID) # define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID #else # define SIMDE_MM_MASK_INVALID (0x0080) #endif #if defined(_MM_MASK_DENORM) # define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM #else # define SIMDE_MM_MASK_DENORM (0x0100) #endif #if defined(_MM_MASK_DIV_ZERO) # define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO #else # define SIMDE_MM_MASK_DIV_ZERO (0x0200) #endif #if defined(_MM_MASK_OVERFLOW) # define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW #else # define SIMDE_MM_MASK_OVERFLOW (0x0400) #endif #if defined(_MM_MASK_UNDERFLOW) # define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW #else # define SIMDE_MM_MASK_UNDERFLOW (0x0800) #endif #if defined(_MM_MASK_INEXACT) # define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT #else # define SIMDE_MM_MASK_INEXACT (0x1000) #endif #if defined(_MM_MASK_MASK) # define SIMDE_MM_MASK_MASK _MM_MASK_MASK #else # define SIMDE_MM_MASK_MASK \ (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \ SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \ SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT) #endif #if defined(_MM_FLUSH_ZERO_MASK) # define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK #else # define SIMDE_MM_FLUSH_ZERO_MASK (0x8000) #endif #if defined(_MM_FLUSH_ZERO_ON) # define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON #else # define SIMDE_MM_FLUSH_ZERO_ON (0x8000) #endif #if defined(_MM_FLUSH_ZERO_OFF) # define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF #else # define SIMDE_MM_FLUSH_ZERO_OFF (0x0000) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_SSE_H) */
omp_for_schedule_static.c
<ompts:test> <ompts:testdescription>Test which checks the static option of the omp for schedule directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp for schedule(static)</ompts:directive> <ompts:dependences>omp for nowait,omp flush,omp critical,omp single</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" //#define NUMBER_OF_THREADS 10 #define CFSMAX_SIZE 1000 #define MAX_TIME 0.01 #ifdef SLEEPTIME #undef SLEEPTIME #define SLEEPTIME 0.0005 #endif int <ompts:testcode:functionname>omp_for_schedule_static</ompts:testcode:functionname> (FILE * logFile) { int threads; int i,lasttid; int NUMBER_OF_THREADS; <ompts:orphan:vars> int * tids; int notout; int maxiter; int chunk_size; </ompts:orphan:vars> int counter = 0; int tmp_count=1; int lastthreadsstarttid = -1; int result = 1; chunk_size = 7; tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1)); notout = 1; maxiter = 0; #pragma omp parallel shared(tids,counter) { /* begin of parallel*/ #pragma omp single { threads = omp_get_num_threads (); NUMBER_OF_THREADS = threads; } /* end of single */ } /* end of parallel */ if (threads < 2) { printf ("This test only works with at least two threads"); fprintf (logFile,"This test only works with at least two threads"); return 0; } else { fprintf (logFile,"Using an internal count of %d\nUsing a specified chunksize of %d\n", CFSMAX_SIZE, chunk_size); tids[CFSMAX_SIZE] = -1; /* setting endflag */ #pragma omp parallel shared(tids) { /* begin of parallel */ <ompts:orphan> double count; int tid; int j; tid = omp_get_thread_num (); #pragma omp for nowait <ompts:check>schedule(static,chunk_size)</ompts:check> for(j = 0; j < CFSMAX_SIZE; ++j) { count = 0.; #pragma omp flush(maxiter) if (j > maxiter) { #pragma omp critical { maxiter = j; } /* end of critical */ } /*printf ("thread %d sleeping\n", tid);*/ while (notout && (count < MAX_TIME) && (maxiter == j)) { #pragma omp flush(maxiter,notout) my_sleep (SLEEPTIME); count += SLEEPTIME; printf("."); } #ifdef VERBOSE if (count > 0.) printf(" waited %lf s\n", count); #endif /*printf ("thread %d awake\n", tid);*/ tids[j] = tid; #ifdef VERBOSE printf("%d finished by %d\n",j,tid); #endif } /* end of for */ notout = 0; #pragma omp flush(maxiter,notout) </ompts:orphan> } /* end of parallel */ /**** analysing the data in array tids ****/ lasttid = tids[0]; tmp_count = 0; for (i = 0; i < CFSMAX_SIZE + 1; ++i) { /* If the work was done by the same thread increase tmp_count by one. */ if (tids[i] == lasttid) { tmp_count++; #ifdef VERBOSE fprintf (logFile, "%d: %d \n", i, tids[i]); #endif continue; } /* Check if the next thread had has the right thread number. When finding * threadnumber -1 the end should be reached. */ if (tids[i] == (lasttid + 1) % threads || tids[i] == -1) { /* checking for the right chunk size */ if (tmp_count == chunk_size) { tmp_count = 1; lasttid = tids[i]; #ifdef VERBOSE fprintf (logFile, "OK\n"); #endif } /* If the chunk size was wrong, check if the end was reached */ else { if (tids[i] == -1) { if (i == CFSMAX_SIZE) { fprintf (logFile, "Last thread had chunk size %d\n", tmp_count); break; } else { fprintf (logFile, "ERROR: Last thread (thread with number -1) was found before the end.\n"); result = 0; } } else { fprintf (logFile, "ERROR: chunk size was %d. (assigned was %d)\n", tmp_count, chunk_size); result = 0; } } } else { fprintf(logFile, "ERROR: Found thread with number %d (should be inbetween 0 and %d).", tids[i], threads - 1); result = 0; } #ifdef VERBOSE fprintf (logFile, "%d: %d \n", i, tids[i]); #endif } } return result; } </ompts:testcode> </ompts:test>
lab4_parallel.c
#include <stdio.h> #include "data.h" #include "time.h" int main(int argc, char **argv) { N = (int) strtol(argv[1], NULL, 10); init(); markStartTime(); #pragma omp parallel for for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { mat3[i][j] = 0; for (int k = 0; k < N; ++k) { mat3[i][j] += mat1[i][k] * mat2[k][j]; } } } markStopTime(); cleanup(); printf("%d\n", getMillis()); return 0; }
cuda_utils.h
/*! * Modifications Copyright 2017 H2O.ai, Inc. */ #ifndef _CUDA_UTILS_H #define _CUDA_UTILS_H #include "cuda_utils2.h" int checkwDev(int wDev){ #ifdef DEBUG int nVis = 0; #pragma omp critical { CUDACHECK(cudaGetDeviceCount(&nVis)); } #ifdef DEBUG for (int i = 0; i < nVis; i++){ cudaDeviceProp props; CUDACHECK(cudaGetDeviceProperties(&props, i)); printf("Visible: Compute %d.%d CUDA device: [%s] : cudadeviceid: %2d of %2d devices [0x%02x] mpc=%d\n", props.major, props.minor, props.name, i\ , nVis, props.pciBusID, props.multiProcessorCount); fflush(stdout); } #endif if(wDev>nVis-1){ fprintf(stderr,"Not enough GPUs, where wDev=%d and nVis=%d\n",wDev,nVis); exit(1); return(1); } else return(0); #else return(0); #endif } #endif
GB_unaryop__identity_int8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_uint8 // op(A') function: GB_tran__identity_int8_uint8 // C type: int8_t // A type: uint8_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_uint8 ( int8_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rar_common.c
/* * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #include "misc.h" // error() static int omp_t = 1; static unsigned char *saved_salt; static unsigned char *saved_key; static int (*cracked); static unpack_data_t (*unpack_data); static unsigned int *saved_len; static unsigned char *aes_key; static unsigned char *aes_iv; #define FORMAT_TAG "$RAR3$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) /* cRARk use 4-char passwords for CPU benchmark */ static struct fmt_tests cpu_tests[] = { {"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"}, {"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"}, {"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #ifdef RAR_OPENCL_FORMAT /* cRARk use 5-char passwords for GPU benchmark */ static struct fmt_tests gpu_tests[] = { {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG {"$RAR3$*0*af24c0c95e9cafc7*e7f207f30dec96a5ad6f917a69d0209e", "magnum"}, {"$RAR3$*0*2653b9204daa2a8e*39b11a475f486206e2ec6070698d9bbc", "123456"}, {"$RAR3$*0*63f1649f16c2b687*8a89f6453297bcdb66bd756fa10ddd98", "abc123"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*575b083d78672e85*965f1453*48*47*1*cd3d8756438f43ab70e668792e28053f0ad7449af1c66863e3e55332bfa304b2c082b9f23b36cd4a8ebc0b743618c5b2*30", "magnum"}, {"$RAR3$*1*6f5954680c87535a*965f1453*64*47*1*c9bb398b9a5d54f035fd22be54bc6dc75822f55833f30eb4fb8cc0b8218e41e6d01824e3467475b90b994a5ddb7fe19366d293c9ee305316c2a60c3a7eb3ce5a*33", "magnum"}, /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #endif typedef struct { dyna_salt dsalt; /* must be first. allows dyna_salt to work */ /* place all items we are NOT going to use for salt comparison, first */ unsigned char *blob; /* data from this point on, is part of the salt for compare reasons */ unsigned char salt[8]; int type; /* 0 = -hp, 1 = -p */ /* for rar -p mode only: */ union { unsigned int w; unsigned char c[4]; } crc; unsigned long long pack_size; unsigned long long unp_size; int method; unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash. // raw_data should be word aligned, and 'ok' unsigned char raw_data[1]; } rarfile; static rarfile *cur_file; #undef set_key static void set_key(char *key, int index) { int plen; UTF16 buf[PLAINTEXT_LENGTH + 1]; /* UTF-16LE encode the password, encoding aware */ plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key)); if (plen < 0) plen = strlen16(buf); memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH); saved_len[index] = plen << 1; #ifdef RAR_OPENCL_FORMAT new_keys = 1; #endif } static void *get_salt(char *ciphertext) { unsigned int i, type, ex_len; static unsigned char *ptr; /* extract data from "salt" */ char *encoded_salt; char *saltcopy = strdup(ciphertext); char *keep_ptr = saltcopy; rarfile *psalt; unsigned char tmp_salt[8]; int inlined = 1; SHA_CTX ctx; if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*)); saltcopy += FORMAT_TAG_LEN; /* skip over "$RAR3$*" */ type = atoi(strtokm(saltcopy, "*")); encoded_salt = strtokm(NULL, "*"); for (i = 0; i < 8; i++) tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])]; if (type == 0) { /* rar-hp mode */ char *encoded_ct = strtokm(NULL, "*"); psalt = mem_calloc(1, sizeof(*psalt)+16); psalt->type = type; ex_len = 16; memcpy(psalt->salt, tmp_salt, 8); for (i = 0; i < 16; i++) psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])]; psalt->blob = psalt->raw_data; psalt->pack_size = 16; } else { char *p = strtokm(NULL, "*"); char crc_c[4]; unsigned long long pack_size; unsigned long long unp_size; for (i = 0; i < 4; i++) crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; pack_size = atoll(strtokm(NULL, "*")); unp_size = atoll(strtokm(NULL, "*")); inlined = atoi(strtokm(NULL, "*")); ex_len = pack_size; /* load ciphertext. We allocate and load all files here, and they are freed when password found. */ #if HAVE_MMAP psalt = mem_calloc(1, sizeof(*psalt) + (inlined ? ex_len : 0)); #else psalt = mem_calloc(1, sizeof(*psalt) + ex_len); #endif psalt->type = type; memcpy(psalt->salt, tmp_salt, 8); psalt->pack_size = pack_size; psalt->unp_size = unp_size; memcpy(psalt->crc.c, crc_c, 4); if (inlined) { unsigned char *d = psalt->raw_data; p = strtokm(NULL, "*"); for (i = 0; i < psalt->pack_size; i++) *d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; psalt->blob = psalt->raw_data; } else { FILE *fp; char *archive_name = strtokm(NULL, "*"); long long pos = atoll(strtokm(NULL, "*")); #if HAVE_MMAP if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } #ifdef DEBUG fprintf(stderr, "RAR mmap() len "LLu" offset 0\n", pos + psalt->pack_size); #endif psalt->blob = mmap(NULL, pos + psalt->pack_size, PROT_READ, MAP_SHARED, fileno(fp), 0); if (psalt->blob == MAP_FAILED) { fprintf(stderr, "Error loading file from " "archive '%s'. Archive possibly " "damaged.\n", archive_name); error(); } psalt->blob += pos; #else size_t count; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } jtr_fseek64(fp, pos, SEEK_SET); count = fread(psalt->raw_data, 1, psalt->pack_size, fp); if (count != psalt->pack_size) { fprintf(stderr, "Error loading file from archive '%s', expected "LLu" bytes, got "Zu". Archive possibly damaged.\n", archive_name, psalt->pack_size, count); error(); } psalt->blob = psalt->raw_data; #endif fclose(fp); } p = strtokm(NULL, "*"); psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])]; if (psalt->method != 0x30) #if ARCH_LITTLE_ENDIAN psalt->crc.w = ~psalt->crc.w; #else psalt->crc.w = JOHNSWAP(~psalt->crc.w); #endif } SHA1_Init(&ctx); SHA1_Update(&ctx, psalt->blob, psalt->pack_size); SHA1_Final(psalt->blob_hash, &ctx); MEM_FREE(keep_ptr); #if HAVE_MMAP psalt->dsalt.salt_alloc_needs_free = inlined; #else psalt->dsalt.salt_alloc_needs_free = 1; #endif psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0); memcpy(ptr, &psalt, sizeof(rarfile*)); return (void*)ptr; } static void set_salt(void *salt) { cur_file = *((rarfile**)salt); memcpy(saved_salt, cur_file->salt, 8); #ifdef RAR_OPENCL_FORMAT HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, 8, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int mode, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if (!(ptr = strtokm(ctcopy, "*"))) /* -p or -h mode */ goto error; if (strlen(ptr) != 1 || !isdec(ptr)) goto error; mode = atoi(ptr); if (mode > 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* salt */ goto error; if (hexlenl(ptr, &extra) != 16 || extra) /* 8 bytes of salt */ goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (mode == 0) { if (hexlenl(ptr, &extra) != 32 || extra) /* 16 bytes of encrypted known plain */ goto error; MEM_FREE(keeptr); return 1; } else { int inlined; long long plen, ulen; if (hexlenl(ptr, &extra) != 8 || extra) /* 4 bytes of CRC */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* pack_size */ goto error; if (strlen(ptr) > 12) { // pack_size > 1 TB? Really? static int warn_once_pack_size = 1; if (warn_once_pack_size) { fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME); warn_once_pack_size = 0; } goto error; } if ((plen = atoll(ptr)) < 16) goto error; if (!(ptr = strtokm(NULL, "*"))) /* unp_size */ goto error; if (strlen(ptr) > 12) { static int warn_once_unp_size = 1; if (warn_once_unp_size) { fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME); warn_once_unp_size = 0; } goto error; } if ((ulen = atoll(ptr)) < 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* inlined */ goto error; if (strlen(ptr) != 1 || !isdec(ptr)) goto error; inlined = atoi(ptr); if (inlined > 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* pack_size / archive_name */ goto error; if (inlined) { if (hexlenl(ptr, &extra) != plen * 2 || extra) goto error; } else { FILE *fp; char *archive_name; archive_name = ptr; if (!(fp = fopen(archive_name, "rb"))) { if (!ldr_in_pot) fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno)); goto error; } if (!(ptr = strtokm(NULL, "*"))) /* pos */ goto error; /* We could go on and actually try seeking to pos but this is enough for now */ fclose(fp); } if (!(ptr = strtokm(NULL, "*"))) /* method */ goto error; } MEM_FREE(keeptr); return 1; error: #ifdef RAR_DEBUG { char buf[68]; strnzcpy(buf, ciphertext, sizeof(buf)); fprintf(stderr, "rejecting %s\n", buf); } #endif MEM_FREE(keeptr); return 0; } static char *get_key(int index) { UTF16 tmpbuf[PLAINTEXT_LENGTH + 1]; memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]); memset(&tmpbuf[saved_len[index] >> 1], 0, 2); return (char*) utf16_to_enc(tmpbuf); } #define ADD_BITS(n) \ { \ if (bits < 9) { \ hold |= ((unsigned int)*next++ << (24 - bits)); \ bits += 8; \ } \ hold <<= n; \ bits -= n; \ } /* * This function is loosely based on JimF's check_inflate_CODE2() from * pkzip_fmt. Together with the other bit-checks, we are rejecting over 96% * of the candidates without resorting to a slow full check (which in turn * may reject semi-early, especially if it's a PPM block) * * Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the * first 2 bits, which have already been decoded, and have told us we had an * LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set. * * RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed * byte order. */ static MAYBE_INLINE int check_huffman(unsigned char *next) { unsigned int bits, hold, i; int left; unsigned int ncount[4]; unsigned char *count = (unsigned char*)ncount; unsigned char bit_length[20]; #ifdef DEBUG unsigned char *was = next; #endif #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED hold = JOHNSWAP(*(unsigned int*)next); #else hold = next[3] + (((unsigned int)next[2]) << 8) + (((unsigned int)next[1]) << 16) + (((unsigned int)next[0]) << 24); #endif next += 4; // we already have the first 32 bits hold <<= 2; // we already processed 2 bits, PPM and keepOldTable bits = 32 - 2; /* First, read 20 pairs of (bitlength[, zerocount]) */ for (i = 0 ; i < 20 ; i++) { int length, zero_count; length = hold >> 28; ADD_BITS(4); if (length == 15) { zero_count = hold >> 28; ADD_BITS(4); if (zero_count == 0) { bit_length[i] = 15; } else { zero_count += 2; while (zero_count-- > 0 && i < sizeof(bit_length) / sizeof(bit_length[0])) bit_length[i++] = 0; i--; } } else { bit_length[i] = length; } } #ifdef DEBUG if (next - was > 16) { fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold); dump_stuff_msg("complete buffer", was, 16); error(); } #endif /* Count the number of codes for each code length */ memset(count, 0, 16); for (i = 0; i < 20; i++) { ++count[bit_length[i]]; } count[0] = 0; if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3]) return 0; /* No codes at all */ left = 1; for (i = 1; i < 16; ++i) { left <<= 1; left -= count[i]; if (left < 0) { return 0; /* over-subscribed */ } } if (left) { return 0; /* incomplete set */ } return 1; /* Passed this check! */ } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static inline void check_rar(int count) { unsigned int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { AES_KEY aes_ctx; unsigned char *key = &aes_key[index * 16]; unsigned char *iv = &aes_iv[index * 16]; AES_set_decrypt_key(key, 128, &aes_ctx); /* AES decrypt, uses aes_iv, aes_key and blob */ if (cur_file->type == 0) { /* rar-hp mode */ unsigned char plain[16]; AES_cbc_encrypt(cur_file->blob, plain, 16, &aes_ctx, iv, AES_DECRYPT); cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7); } else { if (cur_file->method == 0x30) { /* stored, not deflated */ CRC32_t crc; unsigned char crc_out[4]; unsigned char plain[0x8000]; unsigned long long size = cur_file->unp_size; unsigned char *cipher = cur_file->blob; /* Use full decryption with CRC check. Compute CRC of the decompressed plaintext */ CRC32_Init(&crc); while (size) { unsigned int inlen = (size > 0x8000) ? 0x8000 : size; AES_cbc_encrypt(cipher, plain, inlen, &aes_ctx, iv, AES_DECRYPT); CRC32_Update(&crc, plain, inlen); size -= inlen; cipher += inlen; } CRC32_Final(crc_out, crc); /* Compare computed CRC with stored CRC */ cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4); } else { const int solid = 0; unpack_data_t *unpack_t; unsigned char plain[20]; unsigned char pre_iv[16]; cracked[index] = 0; memcpy(pre_iv, iv, 16); /* Decrypt just one block for early rejection */ AES_cbc_encrypt(cur_file->blob, plain, 16, &aes_ctx, pre_iv, AES_DECRYPT); /* Early rejection */ if (plain[0] & 0x80) { // PPM checks here. if (!(plain[0] & 0x20) || // Reset bit must be set (plain[1] & 0x80)) // MaxMB must be < 128 goto bailOut; } else { // LZ checks here. if ((plain[0] & 0x40) || // KeepOldTable can't be set !check_huffman(plain)) // Huffman table check goto bailOut; } /* Reset stuff for full check */ AES_set_decrypt_key(key, 128, &aes_ctx); #ifdef _OPENMP unpack_t = &unpack_data[omp_get_thread_num()]; #else unpack_t = unpack_data; #endif unpack_t->max_size = cur_file->unp_size; unpack_t->dest_unp_size = cur_file->unp_size; unpack_t->pack_size = cur_file->pack_size; unpack_t->iv = iv; unpack_t->ctx = &aes_ctx; unpack_t->key = key; if (rar_unpack29(cur_file->blob, solid, unpack_t)) cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4); bailOut:; } } } }
panama_fmt_plug.c
/* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_panama_; #elif FMT_REGISTERS_H john_register_one(&fmt_panama_); #else #include <string.h> #include "arch.h" #include "sph_panama.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 217k // 64 - 1930k // 128 - 2099k // 256 - 2204k *** set to this level // 512 - 2203k // 1k - 2124k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 8 #else #define OMP_SCALE 256 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "Panama" #define FORMAT_NAME "" #define FORMAT_TAG "$panama$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests panama__tests[] = { {"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"$panama$a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"$panama$017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {"$panama$3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {NULL} }; static char (*saved_key)[((PLAINTEXT_LENGTH + 4)/4)*4]; // PANAMA implementation expects 32-bit alignment static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE * 2 + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p = ciphertext + TAG_LENGTH; int i; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_panama_context ctx; sph_panama_init(&ctx); sph_panama(&ctx, saved_key[index], strlen(saved_key[index])); sph_panama_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void panama_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_panama_ = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, panama__tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, panama_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
hello_openMP.c
#include<stdio.h> #include<omp.h> int main() { #pragma omp parallel { printf("Hello World!\n"); } return 0; }
TaskDispatcher.h
#include "nvtt.h" // OpenMP // http://en.wikipedia.org/wiki/OpenMP #if defined(HAVE_OPENMP) #include <omp.h> #endif // Gran Central Dispatch (GCD/libdispatch) // http://developer.apple.com/mac/library/documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) //#define HAVE_GCD 1 //#include <dispatch/dispatch.h> #endif // Parallel Patterns Library (PPL) is part of Microsoft's concurrency runtime: // http://msdn.microsoft.com/en-us/library/dd504870.aspx #if NV_OS_WIN32 && _MSC_VER >= 1600 //#define HAVE_PPL 1 #include <ppl.h> #endif // Intel Thread Building Blocks (TBB). // http://www.threadingbuildingblocks.org/ #if defined(HAVE_TBB) #include <tbb/parallel_for.h> #endif #include "nvthread/ParallelFor.h" namespace nvtt { struct SequentialTaskDispatcher final : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) override final { for (int i = 0; i < count; i++) { task(context, i); } } }; #if defined(HAVE_OPENMP) struct OpenMPTaskDispatcher final : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) override final { #pragma omp parallel for for (int i = 0; i < count; i++) { task(context, i); } } }; #endif #if HAVE_GCD // Task dispatcher using Apple's Grand Central Dispatch. struct AppleTaskDispatcher final : public TaskDispatcher { // @@ This is really lame, but I refuse to use size_t in the public API. struct BlockContext { Task * task; void * context; }; static void block(void * context, size_t id) { BlockContext * ctx = (BlockContext *)context; ctx->task(ctx->context, int(id)); } virtual void dispatch(Task * task, void * context, int count) { dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); BlockContext blockCtx = { task, context }; dispatch_apply_f(count, q, &blockCtx, block); } }; #endif #if defined(HAVE_PPL) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Microsoft's concurrency runtime. struct MicrosoftTaskDispatcher final : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { TaskFunctor func(task, context); Concurrency::parallel_for(0, count, func); } }; #endif #if defined(HAVE_TBB) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int & n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Intel's Thread Building Blocks. struct IntelTaskDispatcher final : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { parallel_for(blocked_range<int>(0, count, 1), TaskFunctor(task, context)); } }; #endif #if defined(HAVE_OPENMP) typedef OpenMPTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_TBB) typedef IntelTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_PPL) typedef MicrosoftTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_GCD) typedef AppleTaskDispatcher ConcurrentTaskDispatcher; #else typedef SequentialTaskDispatcher ConcurrentTaskDispatcher; //typedef ParallelTaskDispatcher ConcurrentTaskDispatcher; #endif } // namespace nvtt
integral_atomic.c
#include<stdio.h> #include<omp.h> #define NUM_THREADS 4 static long num_steps = 100000; double step; int main(){ int i, nthreads; double pi = 0.0, init_time, finish_time; step = 1.0 / (double)num_steps; init_time = omp_get_wtime(); omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, id, nthrds; double x, sum = 0.0; id = omp_get_thread_num(); nthrds = omp_get_num_threads(); if (id == 0) nthreads = nthrds; for (i=id ; i<num_steps ; i=i+nthrds){ x = (i+0.5)*step; sum += 4.0/(1.0+x*x); } sum *= step; #pragma omp atomic pi += sum; } finish_time = omp_get_wtime()-init_time; printf("PI = %f\n", pi); printf("Time = %f\n", finish_time); }
GB_unaryop__lnot_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_fp64 // op(A') function: GB_tran__lnot_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
drupal7_fmt_plug.c
/* * Drupal 7 phpass variant using SHA-512 and hashes cut at 258 bits. * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * These are 8 byte salted hashes with a loop count that defines the number * of loops to compute. Drupal uses 258 bits of the hash, this is a multiple of * 6 but not 8. I presume this is for getting unpadded base64. Anyway we store * an extra byte but for now we will only compare 256 bits. I doubt that will * pose any problems. Actually I'm not quite sure the last bits end up correct * from the current version of get_binary(). * * Based on [old thick] phpass-md5. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_drupal7; #elif FMT_REGISTERS_H john_register_one(&fmt_drupal7); #else #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Drupal7" #define FORMAT_NAME "$S$" #define FORMAT_TAG "$S$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (x16385)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 47 #define CIPHERTEXT_LENGTH 55 #define DIGEST_SIZE (512/8) #define BINARY_SIZE (258/8) // ((258+7)/8) #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"$S$CwkjgAKeSx2imSiN3SyBEg8e0sgE2QOx4a/VIfCHN0BZUNAWCr1X", "virtualabc"}, {"$S$CFURCPa.k6FAEbJPgejaW4nijv7rYgGc4dUJtChQtV4KLJTPTC/u", "password"}, {"$S$C6x2r.aW5Nkg7st6/u.IKWjTerHXscjPtu4spwhCVZlP89UKcbb/", "NEW_TEMP_PASSWORD"}, {NULL} }; /* * NOTE, due to the 0x4000 iteration count, I am not wasting time pre-loading * keys/salts. We will simply add SIMD code to the crypt_all. We could only * gain < .1% worrying about all the extra stuff from set_key, get_key, the * hashes, etc needed to split out SIMD. We just keep all input data in 'flat' * format, switch to SIMD, do the 0x4000 loops, and put output back into 'flat' * layout again. So we have no 'static' SIMD objects. */ static unsigned char *cursalt; static unsigned loopCnt; static unsigned char (*EncKey)[PLAINTEXT_LENGTH + 1]; static unsigned int *EncKeyLen; static char (*crypt_key)[DIGEST_SIZE]; static void init(struct fmt_main *self) { #if defined (_OPENMP) int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif EncKey = mem_calloc(self->params.max_keys_per_crypt, sizeof(*EncKey)); EncKeyLen = mem_calloc(self->params.max_keys_per_crypt, sizeof(*EncKeyLen)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(EncKeyLen); MEM_FREE(EncKey); } static int valid(char *ciphertext, struct fmt_main *self) { int i; unsigned count_log2; if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH) return 0; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; for (i = FORMAT_TAG_LEN; i < CIPHERTEXT_LENGTH; ++i) if (atoi64[ARCH_INDEX(ciphertext[i])] == 0x7F) return 0; count_log2 = atoi64[ARCH_INDEX(ciphertext[3])]; if (count_log2 < 7 || count_log2 > 31) return 0; return 1; } static void set_salt(void *salt) { loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])])); cursalt = salt; } static void set_key(char *key, int index) { int len; len = strlen(key); EncKeyLen[index] = len; memcpy(((char*)EncKey[index]), key, len + 1); } static char *get_key(int index) { return (char*)EncKey[index]; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (!memcmp(binary, crypt_key[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_key[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT) { #ifdef SIMD_COEF_64 unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys; uint64_t *keys64; unsigned i, j, len, Lcount = loopCnt; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (uint64_t*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len = EncKeyLen[index+i]; for (j = 0; j < 8; ++j) keys[GETPOS(j, i)] = cursalt[j]; for (j = 0; j < len; ++j) keys[GETPOS(j+8, i)] = EncKey[index+i][j]; keys[GETPOS(j+8, i)] = 0x80; keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+8) << 3; } SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { len = EncKeyLen[index+i]; for (j = 0; j < len; ++j) keys[GETPOS(j+64, i)] = EncKey[index+i][j]; keys[GETPOS(j+64, i)] = 0x80; keys64[15*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (len+64) << 3; } while (--Lcount) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); // Last one with FLAT_OUT SIMDSHA512body(keys, (uint64_t*)crypt_key[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_CTX ctx; unsigned char tmp[DIGEST_SIZE + PLAINTEXT_LENGTH]; int len = EncKeyLen[index]; unsigned Lcount = loopCnt - 1; SHA512_Init( &ctx ); SHA512_Update( &ctx, cursalt, 8 ); SHA512_Update( &ctx, EncKey[index], len ); memcpy(&tmp[DIGEST_SIZE], (char *)EncKey[index], len); SHA512_Final( tmp, &ctx); len += DIGEST_SIZE; do { SHA512_Init( &ctx ); SHA512_Update( &ctx, tmp, len); SHA512_Final( tmp, &ctx); } while (--Lcount); SHA512_Init( &ctx ); SHA512_Update( &ctx, tmp, len); SHA512_Final( (unsigned char *) crypt_key[index], &ctx); #endif } return count; } static void * get_binary(char *ciphertext) { int i; unsigned sixbits; static union { unsigned char u8[BINARY_SIZE + 1]; uint32_t u32; } out; int bidx=0; char *pos; pos = &ciphertext[FORMAT_TAG_LEN + 1 + 8]; for (i = 0; i < 10; ++i) { sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx] = sixbits; sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx++] |= (sixbits<<6); sixbits >>= 2; out.u8[bidx] = sixbits; sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx++] |= (sixbits<<4); sixbits >>= 4; out.u8[bidx] = sixbits; sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx++] |= (sixbits<<2); } sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx] = sixbits; sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx++] |= (sixbits<<6); sixbits >>= 2; out.u8[bidx] = sixbits; sixbits = atoi64[ARCH_INDEX(*pos++)]; out.u8[bidx++] |= (sixbits<<4); return out.u8; } static void * get_salt(char *ciphertext) { static union { unsigned char u8[SALT_SIZE + 1]; uint32_t u32; } salt; // store off the 'real' 8 bytes of salt memcpy(salt.u8, &ciphertext[FORMAT_TAG_LEN+1], 8); // append the 1 byte of loop count information. salt.u8[8] = ciphertext[FORMAT_TAG_LEN]; return salt.u8; } static int get_hash_0(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_0; } static int get_hash_1(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_1; } static int get_hash_2(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_2; } static int get_hash_3(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_3; } static int get_hash_4(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_4; } static int get_hash_5(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_5; } static int get_hash_6(int index) { return *((uint32_t *)&crypt_key[index]) & PH_MASK_6; } static int salt_hash(void *salt) { return *((uint32_t *)salt) & 0x3FF; } static unsigned int iteration_count(void *salt) { return (unsigned int) 1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]); } struct fmt_main fmt_drupal7 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, // true salt is SALT_SIZE but we add the loop count SALT_SIZE + 1, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB044-adi-tile-no.c
/** * adi.c: This file is part of the PolyBench/C 3.2 test suite. * Alternating Direction Implicit solver with tiling and nested SIMD. * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include "polybench/polybench.h" /* Include benchmark-specific header. */ /* Default data type is double, default size is 10x1024x1024. */ #include "polybench/adi.h" /* Array initialization. */ static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int i; //int j; { int c1; int c3; int c2; int c4; if (n >= 1) { #pragma omp parallel for private(c1, c4, c2, c3) for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) { #pragma omp parallel for private(c2, c4, c3) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { #pragma omp parallel for private(c3, c4) for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) { #pragma omp parallel for private(c4) for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) { X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n; A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n; B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n; } } } } } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n,double X[500 + 0][500 + 0]) { int i; int j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr,"%0.2lf ",X[i][j]); if ((i * 500 + j) % 20 == 0) fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int t; //int i1; //int i2; //#pragma scop { int c0; int c2; int c8; int c9; int c15; if (n >= 1 && tsteps >= 1) { for (c0 = 0; c0 <= tsteps + -1; c0++) { if (n >= 2) { #pragma omp parallel for private(c2, c15, c9, c8) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1]; } } } for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1]; } } } for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) { for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9]; } } } } } #pragma omp parallel for private(c2, c15) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { #pragma omp parallel for for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1]; } } if (n >= 2) { #pragma omp parallel for private(c2, c15, c9, c8) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15]; } } } for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) { for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15]; } } } for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) { for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) { for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15]; } } } } } #pragma omp parallel for private(c2, c15) for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) { #pragma omp parallel for for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) { X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15]; } } } } } //#pragma endscop } int main(int argc,char **argv) { /* Retrieve problem size. */ int n = 500; int tsteps = 10; /* Variable declaration/allocation. */ double (*X)[500 + 0][500 + 0]; X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; double (*A)[500 + 0][500 + 0]; A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; double (*B)[500 + 0][500 + 0]; B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(n, *X, *A, *B); /* Start timer. */ polybench_timer_start(); ; /* Run kernel. */ kernel_adi(tsteps,n, *X, *A, *B); /* Stop and print timer. */ polybench_timer_stop(); ; polybench_timer_print(); ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ if (argc > 42 && !strcmp(argv[0],"")) print_array(n, *X); /* Be clean. */ free(((void *)X)); ; free(((void *)A)); ; free(((void *)B)); ; return 0; }
GB_unaryop__abs_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_uint16 // op(A') function: GB_tran__abs_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_uint16 ( float *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version " __PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = atoi(argv[1]); char *inputFile = argv[2]; char *outputFile = argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
bi_dir_ctx.h
/* * Copyright (c) 2018 Intel Corporation. All rights reserved. * This software is available to you under the BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ static inline void bi_bw_ctx (int len, perf_metrics_t *metric_info) { double start = 0.0, end = 0.0; int dest = partner_node(metric_info); unsigned long int i, j; static int check_once = 0; if (!check_once) { /* check to see whether sender and receiver are the same process */ if (dest == metric_info->my_node) { fprintf(stderr, "Warning: Sender and receiver are the same " "process (%d)\n", dest); } /* hostname validation for all sender and receiver processes */ int status = check_hostname_validation(metric_info); if (status != 0) return; check_once++; } shmem_barrier_all(); #pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } for (i = 0; i < metric_info->warmup; i++) { for(j = 0; j < metric_info->window_size; j++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } shmem_barrier_all(); if (streaming_node(metric_info)) { #pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } #pragma omp barrier #pragma omp master { start = perf_shmemx_wtime(); } for (i = 0; i < metric_info->trials; i++) { for(j = 0; j < metric_info->window_size; j++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } else { #pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \ shared(metric_info, start, end) num_threads(metric_info->nthreads) { const int thread_id = omp_get_thread_num(); shmem_ctx_t ctx; int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx); if (err) { printf("PE %d, Thr. %d: Error, context creation failed\n", metric_info->my_node, thread_id); shmem_global_exit(1); } for (i = 0; i < metric_info->trials; i++) { for(j = 0; j < metric_info->window_size; j++) { #ifdef USE_NONBLOCKING_API shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #else shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len, metric_info->src + thread_id * len, len, dest); #endif } shmem_ctx_quiet(ctx); } shmem_ctx_destroy(ctx); } } shmem_barrier_all(); if (streaming_node(metric_info)) { end = perf_shmemx_wtime(); calc_and_print_results(end, start, len, metric_info); } shmem_barrier_all(); }
task_memory.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define USE_PRIVATE_TOOL 1 #include "callback.h" #include <omp.h> int main() { int x; #pragma omp parallel num_threads(2) { #pragma omp master { #pragma omp task { x++; } #pragma omp task firstprivate(x) { x++; } } } return 0; } static void on_ompt_callback_implicit_task(ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flag) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); switch (endpoint) { case ompt_scope_begin: task_data->value = ompt_get_unique_id(); printf("ompt_event_implicit_task_begin: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; case ompt_scope_end: printf("ompt_event_implicit_task_end: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d \n", task_data->value, addr, size, result); break; case ompt_scope_beginend: printf("ompt_scope_beginend should never be passed to %s\n", __func__); exit(-1); } } static void on_ompt_callback_task_create(ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *new_task_data, int flags, int has_dependences, const void *codeptr_ra) { if (flags & ompt_task_initial) return; // not interested in the initial task new_task_data->value = ompt_get_unique_id(); void *addr = NULL; size_t size = 0; printf("ompt_event_task_create: task_id=%" PRIu64 "\n", new_task_data->value); } static void on_ompt_callback_task_schedule(ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { void *addr = NULL; size_t size = 0; int result = ompt_get_task_memory(&addr, &size, 0); printf("ompt_event_task_schedule: task_id=%" PRIu64 ", memory_addr=%p, memory_size=%lu, result=%d\n", first_task_data->value, addr, size, result); } int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t)lookup("ompt_set_callback"); ompt_get_unique_id = (ompt_get_unique_id_t)lookup("ompt_get_unique_id"); ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory"); register_ompt_callback(ompt_callback_implicit_task); register_ompt_callback(ompt_callback_task_create); register_ompt_callback(ompt_callback_task_schedule); printf("0: NULL_POINTER=%p\n", (void *)NULL); return 1; // success } void ompt_finalize(ompt_data_t *tool_data) {} ompt_start_tool_result_t *ompt_start_tool(unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize, &ompt_finalize, 0}; return &ompt_start_tool_result; } // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: ompt_event_implicit_task_begin: task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0 // CHECK: ompt_event_task_create: task_id=[[TASK_ID_0:[0-9]+]] // CHECK-DAG: ompt_event_task_create: task_id=[[TASK_ID_1:[0-9]+]] // Expects non-zero address, size, and result // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_0]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK-DAG: ompt_event_task_schedule: task_id=[[TASK_ID_1]], // memory_addr=0x{{[0-f]+}}, memory_size={{[1-9][0-9]*}}, result=1 // CHECK: ompt_event_implicit_task_end: task_id=[[TASK_ID]] // CHECK-SAME: memory_addr=[[NULL]], memory_size=0, result=0
hmvm_mkl.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <mkl.h> #include "hacapk.h" // ######## ######## ######## ######## // mkl blas, parallel (multi-threaded blas) // ######## ######## ######## ######## void hmvm_blas_p_calc_1 (double *zau, matrix *mat, double *zu, double *zbut) { int ip,il,it; int nlf,ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; nlf=mat->nlf; for(ip=0; ip<nlf; ip++){ ndl = mat->submat[ip].ndl; ndt = mat->submat[ip].ndt; nstrtl = mat->submat[ip].nstrtl; nstrtt = mat->submat[ip].nstrtt; if(mat->submat[ip].ltmtx==1){ kt=mat->submat[ip].kt; for(il=0;il<kt;il++)zbut[il]=0.0; dgemv_("t", &ndt, &kt, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); dgemv_("n", &ndl, &kt, &done, mat->submat[ip].a2, &ndl, zbut, &one, &done, &zau[nstrtl-1], &one); } else if(mat->submat[ip].ltmtx==2){ dgemv_("t", &ndt, &ndl, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, &zau[nstrtl-1], &one); } } } void hmvm_blas_p_calc_1t (double *zau, matrix *mat, double *zu, double *zbut) { int ip,il,it; int nlf,ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; nlf=mat->nlf; for(ip=0; ip<nlf; ip++){ ndl = mat->submat[ip].ndl; ndt = mat->submat[ip].ndt; nstrtl = mat->submat[ip].nstrtl; nstrtt = mat->submat[ip].nstrtt; if(mat->submat[ip].ltmtx==1){ kt=mat->submat[ip].kt; for(il=0;il<kt;il++)zbut[il]=0.0; dgemv_("t", &ndt, &kt, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); dgemv_("t", &kt, &ndl, &done, mat->submat[ip].a2t, &kt, zbut, &one, &done, &zau[nstrtl-1], &one); } else if(mat->submat[ip].ltmtx==2){ dgemv_("t", &ndt, &ndl, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, &zau[nstrtl-1], &one); } } } void hmvm_blas_p_calc_2 (double *zau, matrix2 *mat2, double *zu, double *zbut) { int ip,il,it; int nlf,ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill,head; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; nlf=mat2.nlf; for(ip=0; ip<nlf; ip++){ ndl = mat2.ndl[ip]; ndt = mat2.ndt[ip]; nstrtl = mat2.nstrtl[ip]; nstrtt = mat2.nstrtt[ip]; if(mat2.ltmtx[ip]==1){ kt=mat2.kt[ip]; for(il=0;il<kt;il++)zbut[il]=0.0; head = mat2.a1[ip]; dgemv_("t", &ndt, &kt, &done, &mat2.rowmat[head], &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); head = mat2.a2[ip]; dgemv_("n", &ndl, &kt, &done, &mat2.rowmat[head], &ndl, zbut, &one, &done, &zau[nstrtl-1], &one); } else if(mat2.ltmtx[ip]==2){ head = mat2.a1[ip]; dgemv_("t", &ndt, &ndl, &done, &mat2.rowmat[head], &ndt, &zu[nstrtt-1], &one, &done, &zau[nstrtl-1], &one); } } } void hmvm_blas_p_calc_2t (double *zau, matrix2 *mat2, double *zu, double *zbut) { int ip,il,it; int nlf,ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill,head; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; nlf=mat2.nlf; for(ip=0; ip<nlf; ip++){ ndl = mat2.ndl[ip]; ndt = mat2.ndt[ip]; nstrtl = mat2.nstrtl[ip]; nstrtt = mat2.nstrtt[ip]; if(mat2.ltmtx[ip]==1){ kt=mat2.kt[ip]; for(il=0;il<kt;il++)zbut[il]=0.0; head = mat2.a1[ip]; dgemv_("t", &ndt, &kt, &done, &mat2.rowmat_t[head], &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); head = mat2.a2[ip]; dgemv_("t", &kt, &ndl, &done, &mat2.rowmat_t[head], &kt, zbut, &one, &done, &zau[nstrtl-1], &one); } else if(mat2.ltmtx[ip]==2){ head = mat2.a1[ip]; dgemv_("t", &ndt, &ndl, &done, &mat2.rowmat_t[head], &ndt, &zu[nstrtt-1], &one, &done, &zau[nstrtl-1], &one); } } } // mkl blas interface void hmvm_blas_p(matrix *mat, matrix2 *mat2, double *b, int dump_result) { FILE *F; int i, l, nd, tmpkt; double *v=NULL, *tmp=NULL; printf("hmvm_blas_p: begin\n"); fflush(stdout); if(mat!=NULL){nd=mat->nd;tmpkt=mat->ktmax;}else{nd=mat2->nd;tmpkt=mat2->ktmax;} v=(double*)malloc(sizeof(double)*nd); tmp=(double*)malloc(sizeof(double)*tmpkt); for(i=0;i<nd;i++){ b[i] = sin((double)(i+1)); } // blas_p_1 if(mat!=NULL){ printf("blas_p_1\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_p_calc_1(v, mat, b, tmp); if(dump_result){ F = fopen("blas_p_1_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_p_1t if(mat!=NULL){ printf("blas_p_1t\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_p_calc_1t(v, mat, b, tmp); if(dump_result){ F = fopen("blas_p_1t_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_p_2 if(mat2!=NULL){ printf("blas_p_2\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_p_calc_2(v, mat2, b, tmp); if(dump_result){ F = fopen("blas_p_2_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_p_2t if(mat2!=NULL){ printf("blas_p_2t\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_p_calc_2t(v, mat2, b, tmp); if(dump_result){ F = fopen("blas_p_2t_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } free(v); free(tmp); printf("hmvm_blas_p: end\n"); fflush(stdout); } // mkl blas benchmark interface void hmvm_blas_p_bench(matrix *mat, matrix2 *mat2, double *b) { const int L=10; FILE *F; int i, l, nd, tmpkt; double d1, d2, dtimes[L], dmin, dmax, davg; double *v=NULL, *tmp=NULL; printf("hmvm_blas_p_bench: begin\n"); fflush(stdout); if(mat!=NULL){nd=mat->nd;tmpkt=mat->ktmax;}else{nd=mat2->nd;tmpkt=mat2->ktmax;} v=(double*)malloc(sizeof(double)*nd); tmp=(double*)malloc(sizeof(double)*tmpkt); for(i=0;i<nd;i++){ b[i] = sin((double)(i+1)); } // blas_p_1 { printf("blas_p_1\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_p_calc_1(v, mat, b, tmp); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_p_1 min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_p_1t { printf("blas_p_1t\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_p_calc_1t(v, mat, b, tmp); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_p_1t min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_p_2 { printf("blas_p_2\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_p_calc_2(v, mat2, b, tmp); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_p_2 min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_p_2t { printf("blas_p_2t\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_p_calc_2t(v, mat2, b, tmp); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_p_2t min %e max %e avg %e\n", L, dmin, dmax, davg); } free(v); free(tmp); printf("hmvm_blas_p_bench: end\n"); fflush(stdout); } // ######## ######## ######## ######## // mkl blas sequential, called from threads // ######## ######## ######## ######## void hmvm_blas_s_calc_1 (double *zau, matrix *mat, double *zu) { mkl_set_num_threads(1); #pragma omp parallel { int ip,il,it; int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill; double *zaut, *zbut; int ls, le; int i; int nd = mat->nd; int nlf = mat->nlf; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; #pragma omp for for(i=0;i<nd;i++)zau[i]=0.0; zaut = (double*)malloc(sizeof(double)*nd); for(il=0;il<nd;il++)zaut[il]=0.0; zbut = (double*)malloc(sizeof(double)*mat->ktmax); ls = nd; le = 1; #pragma omp for for(ip=0; ip<nlf; ip++){ ndl =mat->submat[ip].ndl; ndt =mat->submat[ip].ndt; nstrtl=mat->submat[ip].nstrtl; nstrtt=mat->submat[ip].nstrtt; if(nstrtl<ls)ls=nstrtl; if(nstrtl+ndl-1>le)le=nstrtl+ndl-1; if(mat->submat[ip].ltmtx==1){ kt=mat->submat[ip].kt; for(il=0;il<kt;il++)zbut[il]=0.0; dgemv_("t", &ndt, &kt, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); dgemv_("n", &ndl, &kt, &done, mat->submat[ip].a2, &ndl, zbut, &one, &done, &zaut[nstrtl-1], &one); } else if(mat->submat[ip].ltmtx==2){ dgemv_("t", &ndt, &ndl, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, &zaut[nstrtl-1], &one); } } for(il=ls-1;il<=le-1;il++){ #pragma omp atomic zau[il] += zaut[il]; } free(zaut); free(zbut); } } void hmvm_blas_s_calc_1t (double *zau, matrix *mat, double *zu) { mkl_set_num_threads(1); #pragma omp parallel { int ip,il,it; int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill; double *zaut, *zbut; int ls, le; int i; int nd = mat->nd; int nlf = mat->nlf; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; #pragma omp for for(i=0;i<nd;i++)zau[i]=0.0; zaut = (double*)malloc(sizeof(double)*nd); for(il=0;il<nd;il++)zaut[il]=0.0; zbut = (double*)malloc(sizeof(double)*mat->ktmax); ls = nd; le = 1; #pragma omp for for(ip=0; ip<nlf; ip++){ ndl =mat->submat[ip].ndl; ndt =mat->submat[ip].ndt; nstrtl=mat->submat[ip].nstrtl; nstrtt=mat->submat[ip].nstrtt; if(nstrtl<ls)ls=nstrtl; if(nstrtl+ndl-1>le)le=nstrtl+ndl-1; if(mat->submat[ip].ltmtx==1){ kt=mat->submat[ip].kt; for(il=0;il<kt;il++)zbut[il]=0.0; dgemv_("t", &ndt, &kt, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); dgemv_("t", &kt, &ndl, &done, mat->submat[ip].a2t, &kt, zbut, &one, &done, &zaut[nstrtl-1], &one); } else if(mat->submat[ip].ltmtx==2){ dgemv_("t", &ndt, &ndl, &done, mat->submat[ip].a1, &ndt, &zu[nstrtt-1], &one, &done, &zaut[nstrtl-1], &one); } } for(il=ls-1;il<=le-1;il++){ #pragma omp atomic zau[il] += zaut[il]; } free(zaut); free(zbut); } } void hmvm_blas_s_calc_2 (double *zau, matrix2 *mat2, double *zu) { mkl_set_num_threads(1); #pragma omp parallel { int ip,il,it; int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill, head; double *zaut, *zbut; int ls, le; int i; int nd = mat2.nd; int nlf = mat2.nlf; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; #pragma omp for for(i=0;i<nd;i++)zau[i]=0.0; zaut = (double*)malloc(sizeof(double)*nd); for(il=0;il<nd;il++)zaut[il]=0.0; zbut = (double*)malloc(sizeof(double)*mat2.ktmax); ls = nd; le = 1; #pragma omp for for(ip=0; ip<nlf; ip++){ ndl =mat2.ndl[ip]; ndt =mat2.ndt[ip]; nstrtl=mat2.nstrtl[ip]; nstrtt=mat2.nstrtt[ip]; if(nstrtl<ls)ls=nstrtl; if(nstrtl+ndl-1>le)le=nstrtl+ndl-1; if(mat2.ltmtx[ip]==1){ kt=mat2.kt[ip]; for(il=0;il<kt;il++)zbut[il]=0.0; head = mat2.a1[ip]; dgemv_("t", &ndt, &kt, &done, &mat2.rowmat[head], &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); head = mat2.a2[ip]; dgemv_("n", &ndl, &kt, &done, &mat2.rowmat[head], &ndl, zbut, &one, &done, &zaut[nstrtl-1], &one); } else if(mat2.ltmtx[ip]==2){ head = mat2.a1[ip]; dgemv_("t", &ndt, &ndl, &done, &mat2.rowmat[head], &ndt, &zu[nstrtt-1], &one, &done, &zaut[nstrtl-1], &one); } } for(il=ls-1;il<=le-1;il++){ #pragma omp atomic zau[il] += zaut[il]; } free(zaut); free(zbut); } } void hmvm_blas_s_calc_2t (double *zau, matrix2 *mat2, double *zu) { mkl_set_num_threads(1); #pragma omp parallel { int ip,il,it; int ndl,ndt,nstrtl,nstrtt,kt,itl,itt,ill, head; double *zaut, *zbut; int ls, le; int i; int nd = mat2.nd; int nlf = mat2.nlf; int zero = 0; int one = 1; double dzero = 0.0; double done = 1.0; #pragma omp for for(i=0;i<nd;i++)zau[i]=0.0; zaut = (double*)malloc(sizeof(double)*nd); for(il=0;il<nd;il++)zaut[il]=0.0; zbut = (double*)malloc(sizeof(double)*mat2.ktmax); ls = nd; le = 1; #pragma omp for for(ip=0; ip<nlf; ip++){ ndl =mat2.ndl[ip]; ndt =mat2.ndt[ip]; nstrtl=mat2.nstrtl[ip]; nstrtt=mat2.nstrtt[ip]; if(nstrtl<ls)ls=nstrtl; if(nstrtl+ndl-1>le)le=nstrtl+ndl-1; if(mat2.ltmtx[ip]==1){ kt=mat2.kt[ip]; for(il=0;il<kt;il++)zbut[il]=0.0; head = mat2.a1[ip]; dgemv_("t", &ndt, &kt, &done, &mat2.rowmat_t[head], &ndt, &zu[nstrtt-1], &one, &done, zbut, &one); head = mat2.a2[ip]; dgemv_("t", &kt, &ndl, &done, &mat2.rowmat_t[head], &kt, zbut, &one, &done, &zaut[nstrtl-1], &one); } else if(mat2.ltmtx[ip]==2){ head = mat2.a1[ip]; dgemv_("t", &ndt, &ndl, &done, &mat2.rowmat_t[head], &ndt, &zu[nstrtt-1], &one, &done, &zaut[nstrtl-1], &one); } } for(il=ls-1;il<=le-1;il++){ #pragma omp atomic zau[il] += zaut[il]; } free(zaut); free(zbut); } } // mkl blas interface void hmvm_blas_s(matrix *mat, matrix2 *mat2, double *b, int dump_result) { const int L=15; FILE *F; int i, l, nd = mat->nd; double d1, d2, dtimes[L], dmin, dmax, davg; double *v=NULL; printf("hmvm_blas_s: begin\n"); fflush(stdout); if(mat!=NULL)nd=mat->nd; else nd=mat2->nd v=(double*)malloc(sizeof(double)*nd); for(i=0;i<nd;i++){ b[i] = sin((double)(i+1)); } // blas_s_1 if(mat!=NULL){ printf("blas_s_1\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_s_calc_1(v, mat, b); if(dump_result){ F = fopen("blas_s_1_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_s_1t if(mat!=NULL){ printf("blas_s_1t\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_s_calc_1t(v, mat, b); if(dump_result){ F = fopen("blas_s_1t_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_s_2 if(mat2!=NULL){ printf("blas_s_2\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_s_calc_2(v, mat2, b); if(dump_result){ F = fopen("blas_s_2_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } // blas_s_2t if(mat2!=NULL){ printf("blas_s_2t\n"); fflush(stdout); for(i=0;i<nd;i++)v[i] = 0.0; hmvm_blas_s_calc_2t(v, mat2, b); if(dump_result){ F = fopen("blas_s_2t_d.txt", "w"); for(i=0;i<nd;i++)fprintf(F, "%.3E\n", v[i]); fclose(F); } } free(v); printf("hmvm_blas_s: end\n"); fflush(stdout); } // mkl blas benchmark interface void hmvm_blas_s_bench(matrix *mat, matrix2 *mat2, double *b) { const int L=10; int i, l, nd; double d1, d2, dtimes[L], dmin, dmax, davg; double *v=NULL; printf("hmvm_blas_s_bench: begin\n"); fflush(stdout); if(mat!=NULL)nd=mat->nd;else nd=mat2->nd; v=(double*)malloc(sizeof(double)*nd); for(i=0;i<nd;i++){ b[i] = sin((double)(i+1)); } // blas_s_1 if(mat!=NULL){ printf("blas_s_1\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_s_calc_1(v, mat, b); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_s_1 min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_s_1t if(mat!=NULL){ printf("blas_s_1t\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_s_calc_1t(v, mat, b); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_s_1t min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_s_2 if(mat2!=NULL){ printf("blas_s_2\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_s_calc_2(v, mat2, b); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_s_2 min %e max %e avg %e\n", L, dmin, dmax, davg); } // blas_s_2t if(mat2!=NULL){ printf("blas_s_2t\n"); fflush(stdout); for(l=0;l<L;l++){ for(i=0;i<nd;i++)v[i] = 0.0; d1 = omp_get_wtime(); hmvm_blas_s_calc_2t(v, mat2, b); d2 = omp_get_wtime(); dtimes[l] = d2-d1; } dmin = 9999.99; dmax = 0.0; davg = 0.0; for(i=5;i<L;i++){ if(dmin>dtimes[i])dmin=dtimes[i]; if(dmax<dtimes[i])dmax=dtimes[i]; davg += dtimes[i]; } davg /= (L-5); printf("TIME %d hmvm_blas_s_2t min %e max %e avg %e\n", L, dmin, dmax, davg); } free(v); printf("hmvm_blas_s_bench: end\n"); fflush(stdout); }
c-parser.c
/* Parser for C and Objective-C. Copyright (C) 1987-2015 Free Software Foundation, Inc. Parser actions based on the old Bison parser; structure somewhat influenced by and fragments based on the C++ parser. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* TODO: Make sure all relevant comments, and all relevant code from all actions, brought over from old parser. Verify exact correspondence of syntax accepted. Add testcases covering every input symbol in every state in old and new parsers. Include full syntax for GNU C, including erroneous cases accepted with error messages, in syntax productions in comments. Make more diagnostics in the front end generally take an explicit location rather than implicitly using input_location. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" /* For rtl.h: needs enum reg_class. */ #include "hash-set.h" #include "vec.h" #include "symtab.h" #include "input.h" #include "alias.h" #include "double-int.h" #include "machmode.h" #include "flags.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "stringpool.h" #include "attribs.h" #include "stor-layout.h" #include "varasm.h" #include "trans-mem.h" #include "langhooks.h" #include "input.h" #include "cpplib.h" #include "timevar.h" #include "c-family/c-pragma.h" #include "c-tree.h" #include "c-lang.h" #include "flags.h" #include "ggc.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "vec.h" #include "target.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hashtab.h" #include "hash-set.h" #include "machmode.h" #include "hard-reg-set.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "plugin.h" #include "omp-low.h" #include "builtins.h" #include "gomp-constants.h" /* Initialization routine for this file. */ void c_parse_init (void) { /* The only initialization required is of the reserved word identifiers. */ unsigned int i; tree id; int mask = 0; /* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in the c_token structure. */ gcc_assert (RID_MAX <= 255); mask |= D_CXXONLY; if (!flag_isoc99) mask |= D_C99; if (flag_no_asm) { mask |= D_ASM | D_EXT; if (!flag_isoc99) mask |= D_EXT89; } if (!c_dialect_objc ()) mask |= D_OBJC | D_CXX_OBJC; ridpointers = ggc_cleared_vec_alloc<tree> ((int) RID_MAX); for (i = 0; i < num_c_common_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (c_common_reswords[i].disable & mask) { if (warn_cxx_compat && (c_common_reswords[i].disable & D_CXXWARN)) { id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN); C_IS_RESERVED_WORD (id) = 1; } continue; } id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, c_common_reswords[i].rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) c_common_reswords[i].rid] = id; } for (i = 0; i < NUM_INT_N_ENTS; i++) { /* We always create the symbols but they aren't always supported. */ char name[50]; sprintf (name, "__int%d", int_n_data[i].bitsize); id = get_identifier (name); C_SET_RID_CODE (id, RID_FIRST_INT_N + i); C_IS_RESERVED_WORD (id) = 1; } } /* The C lexer intermediates between the lexer in cpplib and c-lex.c and the C parser. Unlike the C++ lexer, the parser structure stores the lexer information instead of using a separate structure. Identifiers are separated into ordinary identifiers, type names, keywords and some other Objective-C types of identifiers, and some look-ahead is maintained. ??? It might be a good idea to lex the whole file up front (as for C++). It would then be possible to share more of the C and C++ lexer code, if desired. */ /* More information about the type of a CPP_NAME token. */ typedef enum c_id_kind { /* An ordinary identifier. */ C_ID_ID, /* An identifier declared as a typedef name. */ C_ID_TYPENAME, /* An identifier declared as an Objective-C class name. */ C_ID_CLASSNAME, /* An address space identifier. */ C_ID_ADDRSPACE, /* Not an identifier. */ C_ID_NONE } c_id_kind; /* A single C token after string literal concatenation and conversion of preprocessing tokens to tokens. */ typedef struct GTY (()) c_token { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype) type : 8; /* If this token is a CPP_NAME, this value indicates whether also declared as some kind of type. Otherwise, it is C_ID_NONE. */ ENUM_BITFIELD (c_id_kind) id_kind : 8; /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid) keyword : 8; /* If this token is a CPP_PRAGMA, this indicates the pragma that was seen. Otherwise it is PRAGMA_NONE. */ ENUM_BITFIELD (pragma_kind) pragma_kind : 8; /* The location at which this token was found. */ location_t location; /* The value associated with this token, if any. */ tree value; } c_token; /* A parser structure recording information about the state and context of parsing. Includes lexer information with up to two tokens of look-ahead; more are not needed for C. */ typedef struct GTY(()) c_parser { /* The look-ahead tokens. */ c_token * GTY((skip)) tokens; /* Buffer for look-ahead tokens. */ c_token tokens_buf[2]; /* How many look-ahead tokens are available (0, 1 or 2, or more if parsing from pre-lexed tokens). */ unsigned int tokens_avail; /* True if a syntax error is being recovered from; false otherwise. c_parser_error sets this flag. It should clear this flag when enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error : 1; /* True if we're processing a pragma, and shouldn't automatically consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma : 1; /* True if we're parsing the outermost block of an if statement. */ BOOL_BITFIELD in_if_block : 1; /* True if we want to lex an untranslated string. */ BOOL_BITFIELD lex_untranslated_string : 1; /* Objective-C specific parser/lexer information. */ /* True if we are in a context where the Objective-C "PQ" keywords are considered keywords. */ BOOL_BITFIELD objc_pq_context : 1; /* True if we are parsing a (potential) Objective-C foreach statement. This is set to true after we parsed 'for (' and while we wait for 'in' or ';' to decide if it's a standard C for loop or an Objective-C foreach loop. */ BOOL_BITFIELD objc_could_be_foreach_context : 1; /* The following flag is needed to contextualize Objective-C lexical analysis. In some cases (e.g., 'int NSObject;'), it is undesirable to bind an identifier to an Objective-C class, even if a class with that name exists. */ BOOL_BITFIELD objc_need_raw_identifier : 1; /* Nonzero if we're processing a __transaction statement. The value is 1 | TM_STMT_ATTR_*. */ unsigned int in_transaction : 4; /* True if we are in a context where the Objective-C "Property attribute" keywords are valid. */ BOOL_BITFIELD objc_property_attr_context : 1; /* Cilk Plus specific parser/lexer information. */ /* Buffer to hold all the tokens from parsing the vector attribute for the SIMD-enabled functions (formerly known as elemental functions). */ vec <c_token, va_gc> *cilk_simd_fn_tokens; } c_parser; /* The actual parser and external interface. ??? Does this need to be garbage-collected? */ static GTY (()) c_parser *the_parser; /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token (c_parser *parser, c_token *token) { timevar_push (TV_LEX); token->type = c_lex_with_flags (&token->value, &token->location, NULL, (parser->lex_untranslated_string ? C_LEX_STRING_NO_TRANSLATE : 0)); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; switch (token->type) { case CPP_NAME: { tree decl; bool objc_force_identifier = parser->objc_need_raw_identifier; if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; if (C_IS_RESERVED_WORD (token->value)) { enum rid rid_code = C_RID_CODE (token->value); if (rid_code == RID_CXX_COMPAT_WARN) { warning_at (token->location, OPT_Wc___compat, "identifier %qE conflicts with C++ keyword", token->value); } else if (rid_code >= RID_FIRST_ADDR_SPACE && rid_code <= RID_LAST_ADDR_SPACE) { token->id_kind = C_ID_ADDRSPACE; token->keyword = rid_code; break; } else if (c_dialect_objc () && OBJC_IS_PQ_KEYWORD (rid_code)) { /* We found an Objective-C "pq" keyword (in, out, inout, bycopy, byref, oneway). They need special care because the interpretation depends on the context. */ if (parser->objc_pq_context) { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } else if (parser->objc_could_be_foreach_context && rid_code == RID_IN) { /* We are in Objective-C, inside a (potential) foreach context (which means after having parsed 'for (', but before having parsed ';'), and we found 'in'. We consider it the keyword which terminates the declaration at the beginning of a foreach-statement. Note that this means you can't use 'in' for anything else in that context; in particular, in Objective-C you can't use 'in' as the name of the running variable in a C for loop. We could potentially try to add code here to disambiguate, but it seems a reasonable limitation. */ token->type = CPP_KEYWORD; token->keyword = rid_code; break; } /* Else, "pq" keywords outside of the "pq" context are not keywords, and we fall through to the code for normal tokens. */ } else if (c_dialect_objc () && OBJC_IS_PATTR_KEYWORD (rid_code)) { /* We found an Objective-C "property attribute" keyword (getter, setter, readonly, etc). These are only valid in the property context. */ if (parser->objc_property_attr_context) { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } /* Else they are not special keywords. */ } else if (c_dialect_objc () && (OBJC_IS_AT_KEYWORD (rid_code) || OBJC_IS_CXX_KEYWORD (rid_code))) { /* We found one of the Objective-C "@" keywords (defs, selector, synchronized, etc) or one of the Objective-C "cxx" keywords (class, private, protected, public, try, catch, throw) without a preceding '@' sign. Do nothing and fall through to the code for normal tokens (in C++ we would still consider the CXX ones keywords, but not in C). */ ; } else { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name (token->value); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl && (!objc_force_identifier || global_bindings_p ())) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; switch (C_RID_CODE (token->value)) { /* Replace 'class' with '@class', 'private' with '@private', etc. This prevents confusion with the C++ keyword 'class', and makes the tokens consistent with other Objective-C 'AT' keywords. For example '@class' is reported as RID_AT_CLASS which is consistent with '@synchronized', which is reported as RID_AT_SYNCHRONIZED. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; default: token->keyword = C_RID_CODE (token->value); } break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* These tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); token->value = NULL; break; default: break; } timevar_pop (TV_LEX); } /* Return a pointer to the next token from PARSER, reading it in if necessary. */ static inline c_token * c_parser_peek_token (c_parser *parser) { if (parser->tokens_avail == 0) { c_lex_one_token (parser, &parser->tokens[0]); parser->tokens_avail = 1; } return &parser->tokens[0]; } /* Return true if the next token from PARSER has the indicated TYPE. */ static inline bool c_parser_next_token_is (c_parser *parser, enum cpp_ttype type) { return c_parser_peek_token (parser)->type == type; } /* Return true if the next token from PARSER does not have the indicated TYPE. */ static inline bool c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type) { return !c_parser_next_token_is (parser, type); } /* Return true if the next token from PARSER is the indicated KEYWORD. */ static inline bool c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword) { return c_parser_peek_token (parser)->keyword == keyword; } /* Return a pointer to the next-but-one token from PARSER, reading it in if necessary. The next token is already read in. */ static c_token * c_parser_peek_2nd_token (c_parser *parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert (parser->tokens_avail == 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL); c_lex_one_token (parser, &parser->tokens[1]); parser->tokens_avail = 2; return &parser->tokens[1]; } /* Return true if TOKEN can start a type name, false otherwise. */ static bool c_token_starts_typename (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_ATOMIC: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_AUTO_TYPE: return true; default: if (token->keyword >= RID_FIRST_INT_N && token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS && int_n_enabled_p[token->keyword - RID_FIRST_INT_N]) return true; return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } enum c_lookahead_kind { /* Always treat unknown identifiers as typenames. */ cla_prefer_type, /* Could be parsing a nonabstract declarator. Only treat an identifier as a typename if followed by another identifier or a star. */ cla_nonabstract_decl, /* Never treat identifiers as typenames. */ cla_prefer_id }; /* Return true if the next token from PARSER can start a type name, false otherwise. LA specifies how to do lookahead in order to detect unknown type names. If unsure, pick CLA_PREFER_ID. */ static inline bool c_parser_next_tokens_start_typename (c_parser *parser, enum c_lookahead_kind la) { c_token *token = c_parser_peek_token (parser); if (c_token_starts_typename (token)) return true; /* Try a bit harder to detect an unknown typename. */ if (la != cla_prefer_id && token->type == CPP_NAME && token->id_kind == C_ID_ID /* Do not try too hard when we could have "object in array". */ && !parser->objc_could_be_foreach_context && (la == cla_prefer_type || c_parser_peek_2nd_token (parser)->type == CPP_NAME || c_parser_peek_2nd_token (parser)->type == CPP_MULT) /* Only unknown identifiers. */ && !lookup_name (token->value)) return true; return false; } /* Return true if TOKEN is a type qualifier, false otherwise. */ static bool c_token_is_qualifier (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ADDRSPACE: return true; default: return false; } case CPP_KEYWORD: switch (token->keyword) { case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_ATOMIC: return true; default: return false; } case CPP_LESS: return false; default: gcc_unreachable (); } } /* Return true if the next token from PARSER is a type qualifier, false otherwise. */ static inline bool c_parser_next_token_is_qualifier (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_is_qualifier (token); } /* Return true if TOKEN can start declaration specifiers, false otherwise. */ static bool c_token_starts_declspecs (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_NORETURN: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_ALIGNAS: case RID_ATOMIC: case RID_AUTO_TYPE: return true; default: if (token->keyword >= RID_FIRST_INT_N && token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS && int_n_enabled_p[token->keyword - RID_FIRST_INT_N]) return true; return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if TOKEN can start declaration specifiers or a static assertion, false otherwise. */ static bool c_token_starts_declaration (c_token *token) { if (c_token_starts_declspecs (token) || token->keyword == RID_STATIC_ASSERT) return true; else return false; } /* Return true if the next token from PARSER can start declaration specifiers, false otherwise. */ static inline bool c_parser_next_token_starts_declspecs (c_parser *parser) { c_token *token = c_parser_peek_token (parser); /* In Objective-C, a classname normally starts a declspecs unless it is immediately followed by a dot. In that case, it is the Objective-C 2.0 "dot-syntax" for class objects, ie, calls the setter/getter on the class. c_token_starts_declspecs() can't differentiate between the two cases because it only checks the current token, so we have a special check here. */ if (c_dialect_objc () && token->type == CPP_NAME && token->id_kind == C_ID_CLASSNAME && c_parser_peek_2nd_token (parser)->type == CPP_DOT) return false; return c_token_starts_declspecs (token); } /* Return true if the next tokens from PARSER can start declaration specifiers or a static assertion, false otherwise. */ static inline bool c_parser_next_tokens_start_declaration (c_parser *parser) { c_token *token = c_parser_peek_token (parser); /* Same as above. */ if (c_dialect_objc () && token->type == CPP_NAME && token->id_kind == C_ID_CLASSNAME && c_parser_peek_2nd_token (parser)->type == CPP_DOT) return false; /* Labels do not start declarations. */ if (token->type == CPP_NAME && c_parser_peek_2nd_token (parser)->type == CPP_COLON) return false; if (c_token_starts_declaration (token)) return true; if (c_parser_next_tokens_start_typename (parser, cla_nonabstract_decl)) return true; return false; } /* Consume the next token from PARSER. */ static void c_parser_consume_token (c_parser *parser) { gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA); if (parser->tokens != &parser->tokens_buf[0]) parser->tokens++; else if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* Expect the current token to be a #pragma. Consume it and remember that we've begun parsing a pragma. */ static void c_parser_consume_pragma (c_parser *parser) { gcc_assert (!parser->in_pragma); gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens != &parser->tokens_buf[0]) parser->tokens++; else if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* Update the global input_location from TOKEN. */ static inline void c_parser_set_source_position_from_token (c_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream of PARSER. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". Do not issue a diagnostic if still recovering from an error. ??? This is taken from the C++ parser, but building up messages in this way is not i18n-friendly and some other approach should be used. */ static void c_parser_error (c_parser *parser, const char *gmsgid) { c_token *token = c_parser_peek_token (parser); if (parser->error) return; parser->error = true; if (!gmsgid) return; /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ c_parser_set_source_position_from_token (token); c_parse_error (gmsgid, /* Because c_parse_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), /* ??? The C parser does not save the cpp flags of a token, we need to pass 0 here and we will not get the source spelling of some tokens but rather the canonical spelling. */ token->value, /*flags=*/0); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue the error MSGID. If MSGID is NULL then a message has already been produced and no message will be produced this time. Returns true if found, false otherwise. */ static bool c_parser_require (c_parser *parser, enum cpp_ttype type, const char *msgid) { if (c_parser_next_token_is (parser, type)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* If the next token is the indicated keyword, consume it. Otherwise, issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword (c_parser *parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword (parser, keyword)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* Like c_parser_require, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. If MSGID is NULL then a message has already been produced and no message will be produced this time. */ static void c_parser_skip_until_found (c_parser *parser, enum cpp_ttype type, const char *msgid) { unsigned nesting_depth = 0; if (c_parser_require (parser, type, msgid)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token (parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token (parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until the end of a parameter is found, but do not consume the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter (c_parser *parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token (parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Expect to be at the end of the pragma directive and consume an end of line marker. */ static void c_parser_skip_to_pragma_eol (c_parser *parser, bool error_if_not_eol = true) { gcc_assert (parser->in_pragma); parser->in_pragma = false; if (error_if_not_eol && c_parser_peek_token (parser)->type != CPP_PRAGMA_EOL) c_parser_error (parser, "expected end of line"); cpp_ttype token_type; do { c_token *token = c_parser_peek_token (parser); token_type = token->type; if (token_type == CPP_EOF) break; c_parser_consume_token (parser); } while (token_type != CPP_PRAGMA_EOL); parser->error = false; } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement (c_parser *parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* If the next token is a ';', we have reached the end of the statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token (parser); goto finished; } break; case CPP_CLOSE_BRACE: /* If the next token is a non-nested '}', then we have reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token (parser); goto finished; } break; case CPP_OPEN_BRACE: /* If it the next token is a '{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* If we see a pragma, consume the whole thing at once. We have some safeguards against consuming pragmas willy-nilly. Normally, we'd expect to be here with parser->error set, which disables these safeguards. But it's possible to get here for secondary error recovery, after parser->error has been cleared. */ c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); parser->error = save_error; continue; default: break; } c_parser_consume_token (parser); } finished: parser->error = false; } /* CPP's options (initialized by c-opts.c). */ extern cpp_options *cpp_opts; /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics (void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3) | (warn_long_long << 4) | (warn_cxx_compat << 5) | (warn_overlength_strings << 6) /* warn_c90_c99_compat has three states: -1/0/1, so we must play tricks to properly restore it. */ | ((warn_c90_c99_compat == 1) << 7) | ((warn_c90_c99_compat == -1) << 8) /* Similarly for warn_c99_c11_compat. */ | ((warn_c99_c11_compat == 1) << 9) | ((warn_c99_c11_compat == -1) << 10) ); cpp_opts->cpp_pedantic = pedantic = 0; warn_pointer_arith = 0; cpp_opts->cpp_warn_traditional = warn_traditional = 0; flag_iso = 0; cpp_opts->cpp_warn_long_long = warn_long_long = 0; warn_cxx_compat = 0; warn_overlength_strings = 0; warn_c90_c99_compat = 0; warn_c99_c11_compat = 0; return ret; } /* Restore the warning flags which are controlled by __extension__. FLAGS is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics (int flags) { cpp_opts->cpp_pedantic = pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; cpp_opts->cpp_warn_traditional = warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; cpp_opts->cpp_warn_long_long = warn_long_long = (flags >> 4) & 1; warn_cxx_compat = (flags >> 5) & 1; warn_overlength_strings = (flags >> 6) & 1; /* See above for why is this needed. */ warn_c90_c99_compat = (flags >> 7) & 1 ? 1 : ((flags >> 8) & 1 ? -1 : 0); warn_c99_c11_compat = (flags >> 9) & 1 ? 1 : ((flags >> 10) & 1 ? -1 : 0); } /* Possibly kinds of declarator to parse. */ typedef enum c_dtr_syn { /* A normal declarator with an identifier. */ C_DTR_NORMAL, /* An abstract declarator (maybe empty). */ C_DTR_ABSTRACT, /* A parameter declarator: may be either, but after a type name does not redeclare a typedef name as an identifier if it can alternatively be interpreted as a typedef name; see DR#009, applied in C90 TC1, omitted from C99 and reapplied in C99 TC2 following DR#249. For example, given a typedef T, "int T" and "int *T" are valid parameter declarations redeclaring T, while "int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are abstract declarators rather than involving redundant parentheses; the same applies with attributes inside the parentheses before "T". */ C_DTR_PARM } c_dtr_syn; /* The binary operation precedence levels, where 0 is a dummy lowest level used for the bottom of the stack. */ enum c_parser_prec { PREC_NONE, PREC_LOGOR, PREC_LOGAND, PREC_BITOR, PREC_BITXOR, PREC_BITAND, PREC_EQ, PREC_REL, PREC_SHIFT, PREC_ADD, PREC_MULT, NUM_PRECS }; static void c_parser_external_declaration (c_parser *); static void c_parser_asm_definition (c_parser *); static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool, bool, tree *, vec<c_token>); static void c_parser_static_assert_declaration_no_semi (c_parser *); static void c_parser_static_assert_declaration (c_parser *); static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool, bool, bool, bool, enum c_lookahead_kind); static struct c_typespec c_parser_enum_specifier (c_parser *); static struct c_typespec c_parser_struct_or_union_specifier (c_parser *); static tree c_parser_struct_declaration (c_parser *); static struct c_typespec c_parser_typeof_specifier (c_parser *); static tree c_parser_alignas_specifier (c_parser *); static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner (c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree, tree); static struct c_parm *c_parser_parameter_declaration (c_parser *, tree); static tree c_parser_simple_asm_expr (c_parser *); static tree c_parser_attributes (c_parser *); static struct c_type_name *c_parser_type_name (c_parser *); static struct c_expr c_parser_initializer (c_parser *); static struct c_expr c_parser_braced_init (c_parser *, tree, bool, struct obstack *); static void c_parser_initelt (c_parser *, struct obstack *); static void c_parser_initval (c_parser *, struct c_expr *, struct obstack *); static tree c_parser_compound_statement (c_parser *); static void c_parser_compound_statement_nostart (c_parser *); static void c_parser_label (c_parser *); static void c_parser_statement (c_parser *); static void c_parser_statement_after_labels (c_parser *); static void c_parser_if_statement (c_parser *); static void c_parser_switch_statement (c_parser *); static void c_parser_while_statement (c_parser *, bool); static void c_parser_do_statement (c_parser *, bool); static void c_parser_for_statement (c_parser *, bool); static tree c_parser_asm_statement (c_parser *); static tree c_parser_asm_operands (c_parser *); static tree c_parser_asm_goto_operands (c_parser *); static tree c_parser_asm_clobbers (c_parser *); static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *, tree = NULL_TREE); static struct c_expr c_parser_conditional_expression (c_parser *, struct c_expr *, tree); static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *, tree); static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression (c_parser *); static struct c_expr c_parser_sizeof_expression (c_parser *); static struct c_expr c_parser_alignof_expression (c_parser *); static struct c_expr c_parser_postfix_expression (c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *, struct c_type_name *, location_t); static struct c_expr c_parser_postfix_expression_after_primary (c_parser *, location_t loc, struct c_expr); static tree c_parser_transaction (c_parser *, enum rid); static struct c_expr c_parser_transaction_expression (c_parser *, enum rid); static tree c_parser_transaction_cancel (c_parser *); static struct c_expr c_parser_expression (c_parser *); static struct c_expr c_parser_expression_conv (c_parser *); static vec<tree, va_gc> *c_parser_expr_list (c_parser *, bool, bool, vec<tree, va_gc> **, location_t *, tree *, vec<location_t> *, unsigned int * = NULL); static void c_parser_oacc_enter_exit_data (c_parser *, bool); static void c_parser_oacc_update (c_parser *); static tree c_parser_oacc_loop (location_t, c_parser *, char *); static void c_parser_omp_construct (c_parser *); static void c_parser_omp_threadprivate (c_parser *); static void c_parser_omp_barrier (c_parser *); static void c_parser_omp_flush (c_parser *); static tree c_parser_omp_for_loop (location_t, c_parser *, enum tree_code, tree, tree *); static void c_parser_omp_taskwait (c_parser *); static void c_parser_omp_taskyield (c_parser *); static void c_parser_omp_cancel (c_parser *); static void c_parser_omp_cancellation_point (c_parser *); enum pragma_context { pragma_external, pragma_struct, pragma_param, pragma_stmt, pragma_compound }; static bool c_parser_pragma (c_parser *, enum pragma_context); static bool c_parser_omp_target (c_parser *, enum pragma_context); static void c_parser_omp_end_declare_target (c_parser *); static void c_parser_omp_declare (c_parser *, enum pragma_context); /* These Objective-C parser functions are only ever called when compiling Objective-C. */ static void c_parser_objc_class_definition (c_parser *, tree); static void c_parser_objc_class_instance_variables (c_parser *); static void c_parser_objc_class_declaration (c_parser *); static void c_parser_objc_alias_declaration (c_parser *); static void c_parser_objc_protocol_definition (c_parser *, tree); static bool c_parser_objc_method_type (c_parser *); static void c_parser_objc_method_definition (c_parser *); static void c_parser_objc_methodprotolist (c_parser *); static void c_parser_objc_methodproto (c_parser *); static tree c_parser_objc_method_decl (c_parser *, bool, tree *, tree *); static tree c_parser_objc_type_name (c_parser *); static tree c_parser_objc_protocol_refs (c_parser *); static void c_parser_objc_try_catch_finally_statement (c_parser *); static void c_parser_objc_synchronized_statement (c_parser *); static tree c_parser_objc_selector (c_parser *); static tree c_parser_objc_selector_arg (c_parser *); static tree c_parser_objc_receiver (c_parser *); static tree c_parser_objc_message_args (c_parser *); static tree c_parser_objc_keywordexpr (c_parser *); static void c_parser_objc_at_property_declaration (c_parser *); static void c_parser_objc_at_synthesize_declaration (c_parser *); static void c_parser_objc_at_dynamic_declaration (c_parser *); static bool c_parser_objc_diagnose_bad_element_prefix (c_parser *, struct c_declspecs *); /* Cilk Plus supporting routines. */ static void c_parser_cilk_simd (c_parser *); static void c_parser_cilk_for (c_parser *, tree); static bool c_parser_cilk_verify_simd (c_parser *, enum pragma_context); static tree c_parser_array_notation (location_t, c_parser *, tree, tree); static tree c_parser_cilk_clause_vectorlength (c_parser *, tree, bool); static void c_parser_cilk_grainsize (c_parser *); /* Parse a translation unit (C90 6.7, C99 6.9). translation-unit: external-declarations external-declarations: external-declaration external-declarations external-declaration GNU extensions: translation-unit: empty */ static void c_parser_translation_unit (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_EOF)) { pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C forbids an empty translation unit"); } else { void *obstack_position = obstack_alloc (&parser_obstack, 0); mark_valid_location_for_stdc_pragma (false); do { ggc_collect (); c_parser_external_declaration (parser); obstack_free (&parser_obstack, obstack_position); } while (c_parser_next_token_is_not (parser, CPP_EOF)); } } /* Parse an external declaration (C90 6.7, C99 6.9). external-declaration: function-definition declaration GNU extensions: external-declaration: asm-definition ; __extension__ external-declaration Objective-C: external-declaration: objc-class-definition objc-class-declaration objc-alias-declaration objc-protocol-definition objc-method-definition @end */ static void c_parser_external_declaration (c_parser *parser) { int ext; switch (c_parser_peek_token (parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_external_declaration (parser); restore_extension_diagnostics (ext); break; case RID_ASM: c_parser_asm_definition (parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert (c_dialect_objc ()); c_parser_objc_class_definition (parser, NULL_TREE); break; case RID_AT_CLASS: gcc_assert (c_dialect_objc ()); c_parser_objc_class_declaration (parser); break; case RID_AT_ALIAS: gcc_assert (c_dialect_objc ()); c_parser_objc_alias_declaration (parser); break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_objc_protocol_definition (parser, NULL_TREE); break; case RID_AT_PROPERTY: gcc_assert (c_dialect_objc ()); c_parser_objc_at_property_declaration (parser); break; case RID_AT_SYNTHESIZE: gcc_assert (c_dialect_objc ()); c_parser_objc_at_synthesize_declaration (parser); break; case RID_AT_DYNAMIC: gcc_assert (c_dialect_objc ()); c_parser_objc_at_dynamic_declaration (parser); break; case RID_AT_END: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); objc_finish_implementation (); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PRAGMA: mark_valid_location_for_stdc_pragma (true); c_parser_pragma (parser, pragma_external); mark_valid_location_for_stdc_pragma (false); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc ()) { c_parser_objc_method_definition (parser); break; } /* Else fall through, and yield a syntax error trying to parse as a declaration or function definition. */ default: decl_or_fndef: /* A declaration or a function definition (or, in Objective-C, an @interface or @protocol with prefix attributes). We can only tell which after parsing the declaration specifiers, if any, and the first declarator. */ c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, vNULL); break; } } static void c_finish_omp_declare_simd (c_parser *, tree, tree, vec<c_token>); /* Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, 6.9.1). If FNDEF_OK is true, a function definition is accepted; otherwise (old-style parameter declarations) only other declarations are accepted. If STATIC_ASSERT_OK is true, a static assertion is accepted; otherwise (old-style parameter declarations) it is not. If NESTED is true, we are inside a function or parsing old-style parameter declarations; any functions encountered are nested functions and declaration specifiers are required; otherwise we are at top level and functions are normal functions and declaration specifiers may be optional. If EMPTY_OK is true, empty declarations are OK (subject to all other constraints); otherwise (old-style parameter declarations) they are diagnosed. If START_ATTR_OK is true, the declaration specifiers may start with attributes; otherwise they may not. OBJC_FOREACH_OBJECT_DECLARATION can be used to get back the parsed declaration when parsing an Objective-C foreach statement. declaration: declaration-specifiers init-declarator-list[opt] ; static_assert-declaration function-definition: declaration-specifiers[opt] declarator declaration-list[opt] compound-statement declaration-list: declaration declaration-list declaration init-declarator-list: init-declarator init-declarator-list , init-declarator init-declarator: declarator simple-asm-expr[opt] attributes[opt] declarator simple-asm-expr[opt] attributes[opt] = initializer GNU extensions: nested-function-definition: declaration-specifiers declarator declaration-list[opt] compound-statement Objective-C: attributes objc-class-definition attributes objc-category-definition attributes objc-protocol-definition The simple-asm-expr and attributes are GNU extensions. This function does not handle __extension__; that is handled in its callers. ??? Following the old parser, __extension__ may start external declarations, declarations in functions and declarations at the start of "for" loops, but not old-style parameter declarations. C99 requires declaration specifiers in a function definition; the absence is diagnosed through the diagnosis of implicit int. In GNU C we also allow but diagnose declarations without declaration specifiers, but only at top level (elsewhere they conflict with other syntax). In Objective-C, declarations of the looping variable in a foreach statement are exceptionally terminated by 'in' (for example, 'for (NSObject *object in array) { ... }'). OpenMP: declaration: threadprivate-directive */ static void c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool static_assert_ok, bool empty_ok, bool nested, bool start_attr_ok, tree *objc_foreach_object_declaration, vec<c_token> omp_declare_simd_clauses) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; location_t here = c_parser_peek_token (parser)->location; if (static_assert_ok && c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)) { c_parser_static_assert_declaration (parser); return; } specs = build_null_declspecs (); /* Try to detect an unknown type name when we have "A B" or "A *B". */ if (c_parser_peek_token (parser)->type == CPP_NAME && c_parser_peek_token (parser)->id_kind == C_ID_ID && (c_parser_peek_2nd_token (parser)->type == CPP_NAME || c_parser_peek_2nd_token (parser)->type == CPP_MULT) && (!nested || !lookup_name (c_parser_peek_token (parser)->value))) { error_at (here, "unknown type name %qE", c_parser_peek_token (parser)->value); /* Parse declspecs normally to get a correct pointer type, but avoid a further "fails to be a type name" error. Refuse nested functions since it is not how the user likely wants us to recover. */ c_parser_peek_token (parser)->type = CPP_KEYWORD; c_parser_peek_token (parser)->keyword = RID_VOID; c_parser_peek_token (parser)->value = error_mark_node; fndef_ok = !nested; } c_parser_declspecs (parser, specs, true, true, start_attr_ok, true, true, cla_nonabstract_decl); if (parser->error) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement (parser); return; } finish_declspecs (specs); bool auto_type_p = specs->typespec_word == cts_auto_type; if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (auto_type_p) error_at (here, "%<__auto_type%> in empty declaration"); else if (empty_ok) shadow_tag (specs); else { shadow_tag_warned (specs, 1); pedwarn (here, 0, "empty declaration"); } c_parser_consume_token (parser); return; } /* Provide better error recovery. Note that a type name here is usually better diagnosed as a redeclaration. */ if (empty_ok && specs->typespec_kind == ctsk_tagdef && c_parser_next_token_starts_declspecs (parser) && !c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<;%>, identifier or %<(%>"); parser->error = false; shadow_tag_warned (specs, 1); return; } else if (c_dialect_objc () && !auto_type_p) { /* Prefix attributes are an error on method decls. */ switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: case CPP_MINUS: if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; if (specs->attrs) { warning_at (c_parser_peek_token (parser)->location, OPT_Wattributes, "prefix attributes are ignored for methods"); specs->attrs = NULL_TREE; } if (fndef_ok) c_parser_objc_method_definition (parser); else c_parser_objc_methodproto (parser); return; break; default: break; } /* This is where we parse 'attributes @interface ...', 'attributes @implementation ...', 'attributes @protocol ...' (where attributes could be, for example, __attribute__ ((deprecated)). */ switch (c_parser_peek_token (parser)->keyword) { case RID_AT_INTERFACE: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; c_parser_objc_class_definition (parser, specs->attrs); return; } break; case RID_AT_IMPLEMENTATION: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; if (specs->attrs) { warning_at (c_parser_peek_token (parser)->location, OPT_Wattributes, "prefix attributes are ignored for implementations"); specs->attrs = NULL_TREE; } c_parser_objc_class_definition (parser, NULL_TREE); return; } break; case RID_AT_PROTOCOL: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; c_parser_objc_protocol_definition (parser, specs->attrs); return; } break; case RID_AT_ALIAS: case RID_AT_CLASS: case RID_AT_END: case RID_AT_PROPERTY: if (specs->attrs) { c_parser_error (parser, "unexpected attribute"); specs->attrs = NULL; } break; default: break; } } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; timevar_id_t tv; tree fnbody; /* Declaring either one or more declarators (in which case we should diagnose if there were no declaration specifiers) or a function definition (in which case the diagnostic for implicit int suffices). */ declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_NORMAL, &dummy); if (declarator == NULL) { if (omp_declare_simd_clauses.exists () || !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) c_finish_omp_declare_simd (parser, NULL_TREE, NULL_TREE, omp_declare_simd_clauses); c_parser_skip_to_end_of_block_or_statement (parser); return; } if (auto_type_p && declarator->kind != cdk_id) { error_at (here, "%<__auto_type%> requires a plain identifier" " as declarator"); c_parser_skip_to_end_of_block_or_statement (parser); return; } if (c_parser_next_token_is (parser, CPP_EQ) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword (parser, RID_ASM) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE) || c_parser_next_token_is_keyword (parser, RID_IN)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn (here, 0, "data definition has no type or storage class"); } /* Having seen a data definition, there cannot now be a function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword (parser, RID_ASM)) asm_name = c_parser_simple_asm_expr (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { postfix_attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* This means there is an attribute specifier after the declarator in a function definition. Provide some more information for the user. */ error_at (here, "attributes should be specified before the " "declarator in a function definition"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } if (c_parser_next_token_is (parser, CPP_EQ)) { tree d; struct c_expr init; location_t init_loc; c_parser_consume_token (parser); if (auto_type_p) { start_init (NULL_TREE, asm_name, global_bindings_p ()); init_loc = c_parser_peek_token (parser)->location; init = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (init.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (init.value, 1))) error_at (here, "%<__auto_type%> used with a bit-field" " initializer"); init = convert_lvalue_to_rvalue (init_loc, init, true, true); tree init_type = TREE_TYPE (init.value); /* As with typeof, remove all qualifiers from atomic types. */ if (init_type != error_mark_node && TYPE_ATOMIC (init_type)) init_type = c_build_qualified_type (init_type, TYPE_UNQUALIFIED); bool vm_type = variably_modified_type_p (init_type, NULL_TREE); if (vm_type) init.value = c_save_expr (init.value); finish_init (); specs->typespec_kind = ctsk_typeof; specs->locations[cdw_typedef] = init_loc; specs->typedef_p = true; specs->type = init_type; if (vm_type) { bool maybe_const = true; tree type_expr = c_fully_fold (init.value, false, &maybe_const); specs->expr_const_operands &= maybe_const; if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (type_expr), specs->expr, type_expr); else specs->expr = type_expr; } d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; if (omp_declare_simd_clauses.exists () || !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) c_finish_omp_declare_simd (parser, d, NULL_TREE, omp_declare_simd_clauses); } else { /* The declaration of the variable is in effect while its initializer is parsed. */ d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; if (omp_declare_simd_clauses.exists () || !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) c_finish_omp_declare_simd (parser, d, NULL_TREE, omp_declare_simd_clauses); start_init (d, asm_name, global_bindings_p ()); init_loc = c_parser_peek_token (parser)->location; init = c_parser_initializer (parser); finish_init (); } if (d != error_mark_node) { maybe_warn_string_init (init_loc, TREE_TYPE (d), init); finish_decl (d, init_loc, init.value, init.original_type, asm_name); } } else { if (auto_type_p) { error_at (here, "%<__auto_type%> requires an initialized " "data declaration"); c_parser_skip_to_end_of_block_or_statement (parser); return; } tree d = start_decl (declarator, specs, false, chainon (postfix_attrs, all_prefix_attrs)); if (omp_declare_simd_clauses.exists () || !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) { tree parms = NULL_TREE; if (d && TREE_CODE (d) == FUNCTION_DECL) { struct c_declarator *ce = declarator; while (ce != NULL) if (ce->kind == cdk_function) { parms = ce->u.arg_info->parms; break; } else ce = ce->declarator; } if (parms) temp_store_parm_decls (d, parms); c_finish_omp_declare_simd (parser, d, parms, omp_declare_simd_clauses); if (parms) temp_pop_parm_decls (); } if (d) finish_decl (d, UNKNOWN_LOCATION, NULL_TREE, NULL_TREE, asm_name); if (c_parser_next_token_is_keyword (parser, RID_IN)) { if (d) *objc_foreach_object_declaration = d; else *objc_foreach_object_declaration = error_mark_node; } } if (c_parser_next_token_is (parser, CPP_COMMA)) { if (auto_type_p) { error_at (here, "%<__auto_type%> may only be used with" " a single declarator"); c_parser_skip_to_end_of_block_or_statement (parser); return; } c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); return; } else if (c_parser_next_token_is_keyword (parser, RID_IN)) { /* This can only happen in Objective-C: we found the 'in' that terminates the declaration inside an Objective-C foreach statement. Do not consume the token, so that the caller can use it to determine that this indeed is a foreach context. */ return; } else { c_parser_error (parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } else if (auto_type_p) { error_at (here, "%<__auto_type%> requires an initialized data declaration"); c_parser_skip_to_end_of_block_or_statement (parser); return; } else if (!fndef_ok) { c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } /* Function definition (nested or otherwise). */ if (nested) { pedwarn (here, OPT_Wpedantic, "ISO C forbids nested functions"); c_push_function_context (); } if (!start_function (specs, declarator, all_prefix_attrs)) { /* This can appear in many cases looking nothing like a function definition, so we don't give a more specific error suggesting there was one. */ c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); if (nested) c_pop_function_context (); break; } if (DECL_DECLARED_INLINE_P (current_function_decl)) tv = TV_PARSE_INLINE; else tv = TV_PARSE_FUNC; timevar_push (tv); /* Parse old-style parameter declarations. ??? Attributes are not allowed to start declaration specifiers here because of a syntax conflict between a function declaration with attribute suffix and a function definition with an attribute prefix on first old-style parameter declaration. Following the old parser, they are not accepted on subsequent old-style parameter declarations either. However, there is no ambiguity after the first declaration, nor indeed on the first as long as we don't allow postfix attributes after a declarator with a nonempty identifier list in a definition; and postfix attributes have never been accepted here in function definitions either. */ while (c_parser_next_token_is_not (parser, CPP_EOF) && c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) c_parser_declaration_or_fndef (parser, false, false, false, true, false, NULL, vNULL); store_parm_decls (); if (omp_declare_simd_clauses.exists () || !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) c_finish_omp_declare_simd (parser, current_function_decl, NULL_TREE, omp_declare_simd_clauses); DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus = c_parser_peek_token (parser)->location; fnbody = c_parser_compound_statement (parser); if (flag_cilkplus && contains_array_notation_expr (fnbody)) fnbody = expand_array_notation_exprs (fnbody); if (nested) { tree decl = current_function_decl; /* Mark nested functions as needing static-chain initially. lower_nested_functions will recompute it but the DECL_STATIC_CHAIN flag is also used before that happens, by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */ DECL_STATIC_CHAIN (decl) = 1; add_stmt (fnbody); finish_function (); c_pop_function_context (); add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } else { add_stmt (fnbody); finish_function (); } timevar_pop (tv); break; } } /* Parse an asm-definition (asm() outside a function body). This is a GNU extension. asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition (c_parser *parser) { tree asm_str = c_parser_simple_asm_expr (parser); if (asm_str) symtab->finalize_toplevel_asm (asm_str); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse a static assertion (C11 6.7.10). static_assert-declaration: static_assert-declaration-no-semi ; */ static void c_parser_static_assert_declaration (c_parser *parser) { c_parser_static_assert_declaration_no_semi (parser); if (parser->error || !c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); } /* Parse a static assertion (C11 6.7.10), without the trailing semicolon. static_assert-declaration-no-semi: _Static_assert ( constant-expression , string-literal ) */ static void c_parser_static_assert_declaration_no_semi (c_parser *parser) { location_t assert_loc, value_loc; tree value; tree string; gcc_assert (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)); assert_loc = c_parser_peek_token (parser)->location; if (flag_isoc99) pedwarn_c99 (assert_loc, OPT_Wpedantic, "ISO C99 does not support %<_Static_assert%>"); else pedwarn_c99 (assert_loc, OPT_Wpedantic, "ISO C90 does not support %<_Static_assert%>"); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return; value_loc = c_parser_peek_token (parser)->location; value = c_parser_expr_no_commas (parser, NULL).value; parser->lex_untranslated_string = true; if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { parser->lex_untranslated_string = false; return; } switch (c_parser_peek_token (parser)->type) { case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: string = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); parser->lex_untranslated_string = false; break; default: c_parser_error (parser, "expected string literal"); parser->lex_untranslated_string = false; return; } c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (value_loc, "expression in static assertion is not an integer"); return; } if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); /* Strip no-op conversions. */ STRIP_TYPE_NOPS (value); if (TREE_CODE (value) == INTEGER_CST) pedwarn (value_loc, OPT_Wpedantic, "expression in static assertion " "is not an integer constant expression"); } if (TREE_CODE (value) != INTEGER_CST) { error_at (value_loc, "expression in static assertion is not constant"); return; } constant_expression_warning (value); if (integer_zerop (value)) error_at (assert_loc, "static assertion failed: %E", string); } /* Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7), adding them to SPECS (which may already include some). Storage class specifiers are accepted iff SCSPEC_OK; type specifiers are accepted iff TYPESPEC_OK; alignment specifiers are accepted iff ALIGNSPEC_OK; attributes are accepted at the start iff START_ATTR_OK; __auto_type is accepted iff AUTO_TYPE_OK. declaration-specifiers: storage-class-specifier declaration-specifiers[opt] type-specifier declaration-specifiers[opt] type-qualifier declaration-specifiers[opt] function-specifier declaration-specifiers[opt] alignment-specifier declaration-specifiers[opt] Function specifiers (inline) are from C99, and are currently handled as storage class specifiers, as is __thread. Alignment specifiers are from C11. C90 6.5.1, C99 6.7.1: storage-class-specifier: typedef extern static auto register _Thread_local (_Thread_local is new in C11.) C99 6.7.4: function-specifier: inline _Noreturn (_Noreturn is new in C11.) C90 6.5.2, C99 6.7.2: type-specifier: void char short int long float double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] struct-or-union-specifier enum-specifier typedef-name atomic-type-specifier (_Bool and _Complex are new in C99.) (atomic-type-specifier is new in C11.) C90 6.5.3, C99 6.7.3: type-qualifier: const restrict volatile address-space-qualifier _Atomic (restrict is new in C99.) (_Atomic is new in C11.) GNU extensions: declaration-specifiers: attributes declaration-specifiers[opt] type-qualifier: address-space address-space: identifier recognized by the target storage-class-specifier: __thread type-specifier: typeof-specifier __auto_type __intN _Decimal32 _Decimal64 _Decimal128 _Fract _Accum _Sat (_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf) atomic-type-specifier _Atomic ( type-name ) Objective-C: type-specifier: class-name objc-protocol-refs[opt] typedef-name objc-protocol-refs objc-protocol-refs */ static void c_parser_declspecs (c_parser *parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok, bool alignspec_ok, bool auto_type_ok, enum c_lookahead_kind la) { bool attrs_ok = start_attr_ok; bool seen_type = specs->typespec_kind != ctsk_none; if (!typespec_ok) gcc_assert (la == cla_prefer_id); while (c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD) || (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS))) { struct c_typespec t; tree attrs; tree align; location_t loc = c_parser_peek_token (parser)->location; /* If we cannot accept a type, exit if the next token must start one. Also, if we already have seen a tagged definition, a typename would be an error anyway and likely the user has simply forgotten a semicolon, so we exit. */ if ((!typespec_ok || specs->typespec_kind == ctsk_tagdef) && c_parser_next_tokens_start_typename (parser, la) && !c_parser_next_token_is_qualifier (parser)) break; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *name_token = c_parser_peek_token (parser); tree value = name_token->value; c_id_kind kind = name_token->id_kind; if (kind == C_ID_ADDRSPACE) { addr_space_t as = name_token->keyword - RID_FIRST_ADDR_SPACE; declspecs_add_addrspace (name_token->location, specs, as); c_parser_consume_token (parser); attrs_ok = true; continue; } gcc_assert (!c_parser_next_token_is_qualifier (parser)); /* If we cannot accept a type, and the next token must start one, exit. Do the same if we already have seen a tagged definition, since it would be an error anyway and likely the user has simply forgotten a semicolon. */ if (seen_type || !c_parser_next_tokens_start_typename (parser, la)) break; /* Now at an unknown typename (C_ID_ID), a C_ID_TYPENAME or a C_ID_CLASSNAME. */ c_parser_consume_token (parser); seen_type = true; attrs_ok = true; if (kind == C_ID_ID) { error_at (loc, "unknown type name %qE", value); t.kind = ctsk_typedef; t.spec = error_mark_node; } else if (kind == C_ID_TYPENAME && (!c_dialect_objc () || c_parser_next_token_is_not (parser, CPP_LESS))) { t.kind = ctsk_typedef; /* For a typedef name, record the meaning, not the name. In case of 'foo foo, bar;'. */ t.spec = lookup_name (value); } else { tree proto = NULL_TREE; gcc_assert (c_dialect_objc ()); t.kind = ctsk_objc; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); t.spec = objc_get_protocol_qualified_type (value, proto); } t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (name_token->location, specs, t); continue; } if (c_parser_next_token_is (parser, CPP_LESS)) { /* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - nisse@lysator.liu.se. */ tree proto; gcc_assert (c_dialect_objc ()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs (parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto); t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); continue; } gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD)); switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_NORETURN: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* TODO: Distinguish between function specifiers (inline, noreturn) and storage class specifiers, either here or in declspecs_add_scspec. */ declspecs_add_scspec (loc, specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_AUTO_TYPE: if (!auto_type_ok) goto out; /* Fall through. */ case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; if (c_dialect_objc ()) parser->objc_need_raw_identifier = true; t.kind = ctsk_resword; t.spec = c_parser_peek_token (parser)->value; t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); c_parser_consume_token (parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_TYPEOF: /* ??? The old parser rejected typeof after other type specifiers, but is a syntax error the best way of handling this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_ATOMIC: /* C parser handling of Objective-C constructs needs checking for correct lvalue-to-rvalue conversions, and the code in build_modify_expr handling various Objective-C cases, and that in build_unary_op handling Objective-C cases for increment / decrement, also needs updating; uses of TYPE_MAIN_VARIANT in objc_compare_types and objc_types_are_equivalent may also need updates. */ if (c_dialect_objc ()) sorry ("%<_Atomic%> in Objective-C"); /* C parser handling of OpenMP constructs needs checking for correct lvalue-to-rvalue conversions. */ if (flag_openmp) sorry ("%<_Atomic%> with OpenMP"); if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support the %<_Atomic%> qualifier"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support the %<_Atomic%> qualifier"); attrs_ok = true; tree value; value = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (typespec_ok && c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* _Atomic ( type-name ). */ seen_type = true; c_parser_consume_token (parser); struct c_type_name *type = c_parser_type_name (parser); t.kind = ctsk_typeof; t.spec = error_mark_node; t.expr = NULL_TREE; t.expr_const_operands = true; if (type != NULL) t.spec = groktypename (type, &t.expr, &t.expr_const_operands); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t.spec != error_mark_node) { if (TREE_CODE (t.spec) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); else if (TREE_CODE (t.spec) == FUNCTION_TYPE) error_at (loc, "%<_Atomic%>-qualified function type"); else if (TYPE_QUALS (t.spec) != TYPE_UNQUALIFIED) error_at (loc, "%<_Atomic%> applied to a qualified type"); else t.spec = c_build_qualified_type (t.spec, TYPE_QUAL_ATOMIC); } declspecs_add_type (loc, specs, t); } else declspecs_add_qual (loc, specs, value); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual (loc, specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes (parser); declspecs_add_attrs (loc, specs, attrs); break; case RID_ALIGNAS: if (!alignspec_ok) goto out; align = c_parser_alignas_specifier (parser); declspecs_add_alignas (loc, specs, align); break; default: goto out; } } out: ; } /* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2). enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt] enum attributes[opt] identifier The form with trailing comma is new in C99. The forms with attributes are GNU extensions. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. enumerator-list: enumerator enumerator-list , enumerator enumerator: enumeration-constant enumeration-constant = constant-expression */ static struct c_typespec c_parser_enum_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t enum_loc; location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */ gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM)); enum_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); enum_loc = c_parser_peek_token (parser)->location; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; enum_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ struct c_enum_contents the_enum; tree type; tree postfix_attrs; /* We chain the enumerators in reverse order, then put them in forward order at the end. */ tree values; timevar_push (TV_PARSE_ENUM); type = start_enum (enum_loc, &the_enum, ident); values = NULL_TREE; c_parser_consume_token (parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; c_token *token; location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t decl_loc, value_loc; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } token = c_parser_peek_token (parser); enum_id = token->value; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (token); decl_loc = value_loc = token->location; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); value_loc = c_parser_peek_token (parser)->location; enum_value = c_parser_expr_no_commas (parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator (decl_loc, value_loc, &the_enum, enum_id, enum_value); TREE_CHAIN (enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is (parser, CPP_COMMA)) { comma_loc = c_parser_peek_token (parser)->location; seen_comma = true; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (seen_comma) pedwarn_c90 (comma_loc, OPT_Wpedantic, "comma at end of enumerator list"); c_parser_consume_token (parser); break; } if (!seen_comma) { c_parser_error (parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_enum (type, nreverse (values), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; timevar_pop (TV_PARSE_ENUM); return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (ret.spec)) { gcc_assert (ident); pedwarn (enum_loc, OPT_Wpedantic, "ISO C forbids forward references to %<enum%> types"); } return ret; } /* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1). struct-or-union-specifier: struct-or-union attributes[opt] identifier[opt] { struct-contents } attributes[opt] struct-or-union attributes[opt] identifier struct-contents: struct-declaration-list struct-declaration-list: struct-declaration ; struct-declaration-list struct-declaration ; GNU extensions: struct-contents: empty struct-declaration struct-declaration-list struct-declaration struct-declaration-list: struct-declaration-list ; ; (Note that in the syntax here, unlike that in ISO C, the semicolons are included here rather than in struct-declaration, in order to describe the syntax with extra semicolons and missing semicolon at end.) Objective-C: struct-declaration-list: @defs ( class-name ) (Note this does not include a trailing semicolon, but can be followed by further declarations, and gets a pedwarn-if-pedantic when followed by a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t struct_loc; location_t ident_loc = UNKNOWN_LOCATION; enum tree_code code; switch (c_parser_peek_token (parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable (); } struct_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; struct_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse a struct or union definition. Start the scope of the tag before parsing components. */ struct c_struct_parse_info *struct_info; tree type = start_struct (struct_loc, code, ident, &struct_info); tree postfix_attrs; /* We chain the components in reverse order, then put them in forward order at the end. Each struct-declaration may declare multiple components (comma-separated), so we must use chainon to join them, although when parsing each struct-declaration we can use TREE_CHAIN directly. The theory behind all this is that there will be more semicolon separated fields than comma separated fields, and so we'll be minimizing the number of node traversals required by chainon. */ tree contents; timevar_push (TV_PARSE_STRUCT); contents = NULL_TREE; c_parser_consume_token (parser); /* Handle the Objective-C @defs construct, e.g. foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS)) { tree name; gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto end_at_defs; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else { c_parser_error (parser, "expected class name"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); contents = nreverse (objc_get_class_ivars (name)); } end_at_defs: /* Parse the struct-declarations and semicolons. Problems with semicolons are diagnosed here; empty structures are diagnosed elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_struct); continue; } /* Parse some comma-separated declarations, but not the trailing semicolon if any. */ decls = c_parser_struct_declaration (parser); contents = chainon (decls, contents); /* If no semicolon follows, either we have a parse error or are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) pedwarn (c_parser_peek_token (parser)->location, 0, "no semicolon at end of struct or union"); else if (parser->error || !c_parser_next_token_starts_declspecs (parser)) { c_parser_error (parser, "expected %<;%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); break; } /* If we come here, we have already emitted an error for an expected `;', identifier or `(', and we also recovered already. Go on with the next field. */ } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_struct (struct_loc, type, nreverse (contents), chainon (attrs, postfix_attrs), struct_info); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; timevar_pop (TV_PARSE_STRUCT); return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, code, ident); return ret; } /* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without* the trailing semicolon. struct-declaration: specifier-qualifier-list struct-declarator-list static_assert-declaration-no-semi specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] type-qualifier specifier-qualifier-list[opt] attributes specifier-qualifier-list[opt] struct-declarator-list: struct-declarator struct-declarator-list , attributes[opt] struct-declarator struct-declarator: declarator attributes[opt] declarator[opt] : constant-expression attributes[opt] GNU extensions: struct-declaration: __extension__ struct-declaration specifier-qualifier-list Unlike the ISO C syntax, semicolons are handled elsewhere. The use of attributes where shown is a GNU extension. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. */ static tree c_parser_struct_declaration (c_parser *parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; location_t decl_loc; if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); decl = c_parser_struct_declaration (parser); restore_extension_diagnostics (ext); return decl; } if (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)) { c_parser_static_assert_declaration_no_semi (parser); return NULL_TREE; } specs = build_null_declspecs (); decl_loc = c_parser_peek_token (parser)->location; /* Strictly by the standard, we shouldn't allow _Alignas here, but it appears to have been intended to allow it there, so we're keeping it as it is until WG14 reaches a conclusion of N1731. <http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1731.pdf> */ c_parser_declspecs (parser, specs, false, true, true, true, false, cla_nonabstract_decl); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { tree ret; if (specs->typespec_kind == ctsk_none) { pedwarn (decl_loc, OPT_Wpedantic, "ISO C forbids member declarations with no members"); shadow_tag_warned (specs, pedantic); ret = NULL_TREE; } else { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ tree attrs = NULL; ret = grokfield (c_parser_peek_token (parser)->location, build_id_declarator (NULL_TREE), specs, NULL_TREE, &attrs); if (ret) decl_attributes (&ret, attrs, 0); } return ret; } /* Provide better error recovery. Note that a type name here is valid, and will be treated as a field name. */ if (specs->typespec_kind == ctsk_tagdef && TREE_CODE (specs->type) != ENUMERAL_TYPE && c_parser_next_token_starts_declspecs (parser) && !c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<;%>, identifier or %<(%>"); parser->error = false; return NULL_TREE; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is (parser, CPP_COLON)) declarator = build_id_declarator (NULL_TREE); else declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); break; } if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); width = c_parser_expr_no_commas (parser, NULL).value; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); d = grokfield (c_parser_peek_token (parser)->location, declarator, specs, width, &all_prefix_attrs); decl_attributes (&d, chainon (postfix_attrs, all_prefix_attrs), 0); DECL_CHAIN (d) = decls; decls = d; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error (parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* Parse a typeof specifier (a GNU extension). typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier (c_parser *parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; ret.expr = NULL_TREE; ret.expr_const_operands = true; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_typeof++; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_inhibit_evaluation_warnings--; in_typeof--; return ret; } if (c_parser_next_tokens_start_typename (parser, cla_prefer_id)) { struct c_type_name *type = c_parser_type_name (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (type != NULL) { ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands); pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE)); } } else { bool was_vm; location_t here = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (here, "%<typeof%> applied to a bit-field"); mark_exp_read (expr.value); ret.spec = TREE_TYPE (expr.value); was_vm = variably_modified_type_p (ret.spec, NULL_TREE); /* This is returned with the type so that when the type is evaluated, this can be evaluated. */ if (was_vm) ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands); pop_maybe_used (was_vm); /* For use in macros such as those in <stdatomic.h>, remove all qualifiers from atomic types. (const can be an issue for more macros using typeof than just the <stdatomic.h> ones.) */ if (ret.spec != error_mark_node && TYPE_ATOMIC (ret.spec)) ret.spec = c_build_qualified_type (ret.spec, TYPE_UNQUALIFIED); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* Parse an alignment-specifier. C11 6.7.5: alignment-specifier: _Alignas ( type-name ) _Alignas ( constant-expression ) */ static tree c_parser_alignas_specifier (c_parser * parser) { tree ret = error_mark_node; location_t loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNAS)); c_parser_consume_token (parser); if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Alignas%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Alignas%>"); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return ret; if (c_parser_next_tokens_start_typename (parser, cla_prefer_id)) { struct c_type_name *type = c_parser_type_name (parser); if (type != NULL) ret = c_sizeof_or_alignof_type (loc, groktypename (type, NULL, NULL), false, true, 1); } else ret = c_parser_expr_no_commas (parser, NULL).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may be redeclared; otherwise it may not. KIND indicates which kind of declarator is wanted. Returns a valid declarator except in the case of a syntax error in which case NULL is returned. *SEEN_ID is set to true if an identifier being declared is seen; this is used to diagnose bad forms of abstract array declarators and to determine whether an identifier list is syntactically permitted. declarator: pointer[opt] direct-declarator direct-declarator: identifier ( attributes[opt] declarator ) direct-declarator array-declarator direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list[opt] ) pointer: * type-qualifier-list[opt] * type-qualifier-list[opt] pointer type-qualifier-list: type-qualifier attributes type-qualifier-list type-qualifier type-qualifier-list attributes array-declarator: [ type-qualifier-list[opt] assignment-expression[opt] ] [ static type-qualifier-list[opt] assignment-expression ] [ type-qualifier-list static assignment-expression ] [ type-qualifier-list[opt] * ] parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator attributes[opt] declaration-specifiers abstract-declarator[opt] attributes[opt] identifier-list: identifier identifier-list , identifier abstract-declarator: pointer pointer[opt] direct-abstract-declarator direct-abstract-declarator: ( attributes[opt] abstract-declarator ) direct-abstract-declarator[opt] array-declarator direct-abstract-declarator[opt] ( parameter-type-list[opt] ) GNU extensions: direct-declarator: direct-declarator ( parameter-forward-declarations parameter-type-list[opt] ) direct-abstract-declarator: direct-abstract-declarator[opt] ( parameter-forward-declarations parameter-type-list[opt] ) parameter-forward-declarations: parameter-list ; parameter-forward-declarations parameter-list ; The uses of attributes shown above are GNU extensions. Some forms of array declarator are not included in C99 in the syntax for abstract declarators; these are disallowed elsewhere. This may be a defect (DR#289). This function also accepts an omitted abstract declarator as being an abstract declarator, although not part of the formal syntax. */ static struct c_declarator * c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator (quals_attrs, inner); } /* Now we have a direct declarator, direct abstract declarator or nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id); } /* Parse a direct declarator or direct abstract declarator; arguments as c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* The direct declarator must start with an identifier (possibly omitted) or a parenthesized declarator (possibly abstract). In an ordinary declarator, initial parentheses must start a parenthesized declarator. In an abstract declarator or parameter declarator, they could start a parenthesized declarator or a parameter list. To tell which, the open parenthesis and any following attributes must be read. If a declaration specifier follows, then it is a parameter list; if the specifier is a typedef name, there might be an ambiguity about redeclaring it, which is resolved in the direction of treating it as a typedef name. If a close parenthesis follows, it is also an empty parameter list, as the syntax does not permit empty abstract declarators. Otherwise, it is a parenthesized declarator (in which case the analysis may be repeated inside it, recursively). ??? There is an ambiguity in a parameter declaration "int (__attribute__((foo)) x)", where x is not a typedef name: it could be an abstract declarator for a function, or declare x with parentheses. The proper resolution of this ambiguity needs documenting. At present we follow an accident of the old parser's implementation, whereby the first parameter must have some declaration specifiers other than just attributes. Thus as a parameter declaration it is treated as a parenthesized parameter named x, and as an abstract declarator it is rejected. ??? Also following the old parser, attributes inside an empty parameter list are ignored, making it a list not yielding a prototype, rather than giving an error or making it have one parameter with implicit type int. ??? Also following the old parser, typedef names may be redeclared in declarators, but not Objective-C class names. */ if (kind != C_DTR_ABSTRACT && c_parser_next_token_is (parser, CPP_NAME) && ((type_seen_p && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) || c_parser_peek_token (parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator (c_parser_peek_token (parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator (NULL_TREE); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } /* Either we are at the end of an abstract declarator, or we have parentheses. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs (parser) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator (parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, build_id_declarator (NULL_TREE)); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator (attrs, inner); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner (parser, *seen_id, inner); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error (parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator (NULL_TREE); } } /* Parse part of a direct declarator or direct abstract declarator, given that some (in INNER) has already been parsed; ID_PRESENT is true if an identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner (c_parser *parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { location_t brace_loc = c_parser_peek_token (parser)->location; struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs (); bool static_seen; bool star_seen; struct c_expr dimen; dimen.value = NULL_TREE; dimen.original_code = ERROR_MARK; dimen.original_type = NULL_TREE; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC); if (static_seen) c_parser_consume_token (parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* If "static" is present, there must be an array dimension. Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } else { if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { dimen.value = NULL_TREE; star_seen = false; } else if (flag_cilkplus && c_parser_next_token_is (parser, CPP_COLON)) { dimen.value = error_mark_node; star_seen = false; error_at (c_parser_peek_token (parser)->location, "array notations cannot be used in declaration"); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE) { dimen.value = NULL_TREE; star_seen = true; c_parser_consume_token (parser); } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) c_parser_consume_token (parser); else if (flag_cilkplus && c_parser_next_token_is (parser, CPP_COLON)) { error_at (c_parser_peek_token (parser)->location, "array notations cannot be used in declaration"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL; } else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } if (dimen.value) dimen = convert_lvalue_to_rvalue (brace_loc, dimen, true, true); declarator = build_array_declarator (brace_loc, dimen.value, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner (declarator, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); args = c_parser_parms_declarator (parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } } return inner; } /* Parse a parameter list or identifier list, including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. ID_LIST_OK is true if an identifier list is acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs) { push_scope (); declare_parm_level (); /* If the list starts with an identifier, it is an identifier list. Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID /* Look ahead to detect typos in type names. */ && c_parser_peek_2nd_token (parser)->type != CPP_NAME && c_parser_peek_2nd_token (parser)->type != CPP_MULT && c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN && c_parser_peek_2nd_token (parser)->type != CPP_OPEN_SQUARE) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { *nextp = build_tree_list (NULL_TREE, c_parser_peek_token (parser)->value); nextp = & TREE_CHAIN (*nextp); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_error (parser, "expected identifier"); break; } } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = build_arg_info (); ret->types = list; c_parser_consume_token (parser); pop_scope (); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope (); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs, NULL); pop_scope (); return ret; } } /* Parse a parameter list (possibly empty), including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ static struct c_arg_info * c_parser_parms_list_declarator (c_parser *parser, tree attrs, tree expr) { bool bad_parm = false; /* ??? Following the old parser, forward parameter declarations may use abstract declarators, and if no real parameter declarations follow the forward declarations then this is not diagnosed. Also note as above that attributes are ignored as the only contents of the parentheses, or as the only contents after forward declarations. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = build_arg_info (); c_parser_consume_token (parser); return ret; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = build_arg_info (); if (flag_allow_parameterless_variadic_functions) { /* F (...) is allowed. */ ret->types = NULL_TREE; } else { /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error_at (c_parser_peek_token (parser)->location, "ISO C requires a named argument before %<...%>"); } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* Nonempty list of parameters, either terminated with semicolon (forward declarations; recurse) or with close parenthesis (normal function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration (parser, attrs); attrs = NULL_TREE; if (parm == NULL) bad_parm = true; else push_parm_decl (parm, &expr); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token (parser); mark_forward_parm_decls (); new_attrs = c_parser_attributes (parser); return c_parser_parms_list_declarator (parser, new_attrs, expr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (bad_parm) return NULL; else return get_parm_info (false, expr); } if (!c_parser_require (parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (bad_parm) return NULL; else return get_parm_info (true, expr); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } } } /* Parse a parameter declaration. ATTRS are the attributes at the start of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration (c_parser *parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; /* Accept #pragmas between parameter declarations. */ while (c_parser_next_token_is (parser, CPP_PRAGMA)) c_parser_pragma (parser, pragma_param); if (!c_parser_next_token_starts_declspecs (parser)) { c_token *token = c_parser_peek_token (parser); if (parser->error) return NULL; c_parser_set_source_position_from_token (token); if (c_parser_next_tokens_start_typename (parser, cla_prefer_type)) { error_at (token->location, "unknown type name %qE", token->value); parser->error = true; } /* ??? In some Objective-C cases '...' isn't applicable so there should be a different message. */ else c_parser_error (parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter (parser); return NULL; } specs = build_null_declspecs (); if (attrs) { declspecs_add_attrs (input_location, specs, attrs); attrs = NULL_TREE; } c_parser_declspecs (parser, specs, true, true, true, true, false, cla_nonabstract_decl); finish_declspecs (specs); pending_xref_error (); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found (parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs), declarator); } /* Parse a string literal in an asm expression. It should not be translated, and wide string literals are an error although permitted by the syntax. This is a GNU extension. asm-string-literal: string-literal ??? At present, following the old parser, the caller needs to have set lex_untranslated_string to 1. It would be better to follow the C++ parser rather than using this kludge. */ static tree c_parser_asm_string_literal (c_parser *parser) { tree str; int save_flag = warn_overlength_strings; warn_overlength_strings = 0; if (c_parser_next_token_is (parser, CPP_STRING)) { str = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_WSTRING)) { error_at (c_parser_peek_token (parser)->location, "wide string literal in %<asm%>"); str = build_string (1, ""); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected string literal"); str = NULL_TREE; } warn_overlength_strings = save_flag; return str; } /* Parse a simple asm expression. This is used in restricted contexts, where a full expression with inputs and outputs does not make sense. This is a GNU extension. simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr (c_parser *parser) { tree str; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return NULL_TREE; } str = c_parser_asm_string_literal (parser); parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } static tree c_parser_attribute_any_word (c_parser *parser) { tree attr_name = NULL_TREE; if (c_parser_next_token_is (parser, CPP_KEYWORD)) { /* ??? See comment above about what keywords are accepted here. */ bool ok; switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_NORETURN: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_CANCEL: case RID_ATOMIC: case RID_AUTO_TYPE: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: ok = true; break; default: ok = false; break; } if (!ok) return NULL_TREE; /* Accept __attribute__((__const)) as __attribute__((const)) etc. */ attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword]; } else if (c_parser_next_token_is (parser, CPP_NAME)) attr_name = c_parser_peek_token (parser)->value; return attr_name; } /* Returns true of NAME is an IDENTIFIER_NODE with identiifer "vector," "__vector" or "__vector__." */ static inline bool is_cilkplus_vector_p (tree name) { if (flag_cilkplus && is_attribute_p ("vector", name)) return true; return false; } #define CILK_SIMD_FN_CLAUSE_MASK \ ((OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_VECTORLENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_UNIFORM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_MASK) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_NOMASK)) /* Parses the vector attribute of SIMD enabled functions in Cilk Plus. VEC_TOKEN is the "vector" token that is replaced with "simd" and pushed into the token list. Syntax: vector vector (<vector attributes>). */ static void c_parser_cilk_simd_fn_vector_attrs (c_parser *parser, c_token vec_token) { gcc_assert (is_cilkplus_vector_p (vec_token.value)); int paren_scope = 0; vec_safe_push (parser->cilk_simd_fn_tokens, vec_token); /* Consume the "vector" token. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); paren_scope++; } while (paren_scope > 0) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_OPEN_PAREN) paren_scope++; else if (token->type == CPP_CLOSE_PAREN) paren_scope--; /* Do not push the last ')' since we are not pushing the '('. */ if (!(token->type == CPP_CLOSE_PAREN && paren_scope == 0)) vec_safe_push (parser->cilk_simd_fn_tokens, *token); c_parser_consume_token (parser); } /* Since we are converting an attribute to a pragma, we need to end the attribute with PRAGMA_EOL. */ c_token eol_token; memset (&eol_token, 0, sizeof (eol_token)); eol_token.type = CPP_PRAGMA_EOL; vec_safe_push (parser->cilk_simd_fn_tokens, eol_token); } /* Add 2 CPP_EOF at the end of PARSER->ELEM_FN_TOKENS vector. */ static void c_finish_cilk_simd_fn_tokens (c_parser *parser) { c_token last_token = parser->cilk_simd_fn_tokens->last (); /* c_parser_attributes is called in several places, so if these EOF tokens are already inserted, then don't do them again. */ if (last_token.type == CPP_EOF) return; /* Two CPP_EOF token are added as a safety net since the normal C front-end has two token look-ahead. */ c_token eof_token; eof_token.type = CPP_EOF; vec_safe_push (parser->cilk_simd_fn_tokens, eof_token); vec_safe_push (parser->cilk_simd_fn_tokens, eof_token); } /* Parse (possibly empty) attributes. This is a GNU extension. attributes: empty attributes attribute attribute: __attribute__ ( ( attribute-list ) ) attribute-list: attrib attribute_list , attrib attrib: empty any-word any-word ( identifier ) any-word ( identifier , nonempty-expr-list ) any-word ( expr-list ) where the "identifier" must not be declared as a type, and "any-word" may be any identifier (including one declared as a type), a reserved word storage class specifier, type specifier or type qualifier. ??? This still leaves out most reserved keywords (following the old parser), shouldn't we include them, and why not allow identifiers declared as types to start the arguments? */ static tree c_parser_attributes (c_parser *parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return attrs; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; vec<tree, va_gc> *expr_list; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } attr_name = c_parser_attribute_any_word (parser); if (attr_name == NULL) break; if (is_cilkplus_vector_p (attr_name)) { c_token *v_token = c_parser_peek_token (parser); c_parser_cilk_simd_fn_vector_attrs (parser, *v_token); continue; } c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { attr = build_tree_list (attr_name, NULL_TREE); attrs = chainon (attrs, attr); continue; } c_parser_consume_token (parser); /* Parse the attribute contents. If they start with an identifier which is followed by a comma or close parenthesis, then the arguments start with that identifier; otherwise they are an expression list. In objective-c the identifier may be a classname. */ if (c_parser_next_token_is (parser, CPP_NAME) && (c_parser_peek_token (parser)->id_kind == C_ID_ID || (c_dialect_objc () && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) && ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) && (attribute_takes_identifier_p (attr_name) || (c_dialect_objc () && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { tree arg1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list (NULL_TREE, arg1); else { tree tree_list; c_parser_consume_token (parser); expr_list = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); tree_list = build_tree_list_vec (expr_list); attr_args = tree_cons (NULL_TREE, arg1, tree_list); release_tree_vector (expr_list); } } else { if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else { expr_list = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); attr_args = build_tree_list_vec (expr_list); release_tree_vector (expr_list); } } attr = build_tree_list (attr_name, attr_args); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } attrs = chainon (attrs, attr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } parser->lex_untranslated_string = false; } if (flag_cilkplus && !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) c_finish_cilk_simd_fn_tokens (parser); return attrs; } /* Parse a type name (C90 6.5.5, C99 6.7.6). type-name: specifier-qualifier-list abstract-declarator[opt] */ static struct c_type_name * c_parser_type_name (c_parser *parser) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true, false, false, cla_prefer_type); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } if (specs->type != error_mark_node) { pending_xref_error (); finish_declspecs (specs); } declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW (&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* Parse an initializer (C90 6.5.7, C99 6.7.8). initializer: assignment-expression { initializer-list } { initializer-list , } initializer-list: designation[opt] initializer initializer-list , designation[opt] initializer designation: designator-list = designator-list: designator designator-list designator designator: array-designator . identifier array-designator: [ constant-expression ] GNU extensions: initializer: { } designation: array-designator identifier : array-designator: [ constant-expression ... constant-expression ] Any expression without commas is accepted in the syntax for the constant-expressions, with non-constant expressions rejected later. This function is only used for top-level initializers; for nested ones, see c_parser_initval. */ static struct c_expr c_parser_initializer (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_braced_init (parser, NULL_TREE, false, NULL); else { struct c_expr ret; location_t loc = c_parser_peek_token (parser)->location; ret = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (ret.value) != STRING_CST && TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR) ret = convert_lvalue_to_rvalue (loc, ret, true, true); return ret; } } /* Parse a braced initializer list. TYPE is the type specified for a compound literal, and NULL_TREE for other initializers and for nested braced lists. NESTED_P is true for nested braced lists, false for the list of a compound literal or the list that is the top-level initializer in a declaration. */ static struct c_expr c_parser_braced_init (c_parser *parser, tree type, bool nested_p, struct obstack *outer_obstack) { struct c_expr ret; struct obstack braced_init_obstack; location_t brace_loc = c_parser_peek_token (parser)->location; gcc_obstack_init (&braced_init_obstack); gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); if (nested_p) { finish_implicit_inits (brace_loc, outer_obstack); push_init_level (brace_loc, 0, &braced_init_obstack); } else really_start_incremental_init (type); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { pedwarn (brace_loc, OPT_Wpedantic, "ISO C forbids empty initializer braces"); } else { /* Parse a non-empty initializer list, possibly with a trailing comma. */ while (true) { c_parser_initelt (parser, &braced_init_obstack); if (parser->error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; } } if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>"); pop_init_level (brace_loc, 0, &braced_init_obstack); obstack_free (&braced_init_obstack, NULL); return ret; } c_parser_consume_token (parser); ret = pop_init_level (brace_loc, 0, &braced_init_obstack); obstack_free (&braced_init_obstack, NULL); return ret; } /* Parse a nested initializer, including designators. */ static void c_parser_initelt (c_parser *parser, struct obstack * braced_init_obstack) { /* Parse any designator or designator list. A single array designator may have the subsequent "=" omitted in GNU C, but a longer list or a structure member designator may not. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value, braced_init_obstack); /* Use the colon as the error location. */ pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_Wpedantic, "obsolete use of designated initializer with %<:%>"); c_parser_consume_token (parser); c_parser_consume_token (parser); } else { /* des_seen is 0 if there have been no designators, 1 if there has been a single array designator and 2 otherwise. */ int des_seen = 0; /* Location of a designator. */ location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */ while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DOT)) { int des_prev = des_seen; if (!des_seen) des_loc = c_parser_peek_token (parser)->location; if (des_seen < 2) des_seen++; if (c_parser_next_token_is (parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { set_init_label (des_loc, c_parser_peek_token (parser)->value, braced_init_obstack); c_parser_consume_token (parser); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (input_location, init, false, braced_init_obstack); return; } } else { tree first, second; location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t array_index_loc = UNKNOWN_LOCATION; /* ??? Following the old parser, [ objc-receiver objc-message-args ] is accepted as an initializer, being distinguished from a designator by what follows the first assignment expression inside the square brackets, but after a first array designator a subsequent square bracket is for Objective-C taken to start an expression, using the obsolete form of designated initializer without '=', rather than possibly being a second level of designation: in LALR terms, the '[' is shifted rather than reducing designator to designator-list. */ if (des_prev == 1 && c_dialect_objc ()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc ()) { /* This might be an array designator or an Objective-C message expression. If the former, continue parsing here; if the latter, parse the remainder of the initializer given the starting primary-expression. ??? It might make sense to distinguish when des_prev == 1 as well; see previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token (parser); if (c_parser_peek_token (parser)->type == CPP_NAME && ((c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); rec = objc_get_class_reference (id); goto parse_message_args; } first = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (first); if (c_parser_next_token_is (parser, CPP_ELLIPSIS) || c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* Expression receiver. So far only one part without commas has been parsed; there might be more of the expression. */ rec = first; while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t comma_loc, exp_loc; comma_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = convert_lvalue_to_rvalue (exp_loc, next, true, true); rec = build_compound_expr (comma_loc, rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr (rec, args); mexpr.original_code = ERROR_MARK; mexpr.original_type = NULL; /* Now parse and process the remainder of the initializer, starting with this message expression as a primary-expression. */ c_parser_initval (parser, &mexpr, braced_init_obstack); return; } c_parser_consume_token (parser); array_index_loc = c_parser_peek_token (parser)->location; first = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (first); array_desig_after_first: if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); second = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (second); } else second = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token (parser); set_init_index (array_index_loc, first, second, braced_init_obstack); if (second) pedwarn (ellipsis_loc, OPT_Wpedantic, "ISO C forbids specifying range of elements to initialize"); } else c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is (parser, CPP_EQ)) { pedwarn_c90 (des_loc, OPT_Wpedantic, "ISO C90 forbids specifying subobject " "to initialize"); c_parser_consume_token (parser); } else { if (des_seen == 1) pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "obsolete use of designated initializer without %<=%>"); else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected %<=%>"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (input_location, init, false, braced_init_obstack); return; } } } } c_parser_initval (parser, NULL, braced_init_obstack); } /* Parse a nested initializer; as c_parser_initializer but parses initializers within braced lists, after any designators have been applied. If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the initializer. */ static void c_parser_initval (c_parser *parser, struct c_expr *after, struct obstack * braced_init_obstack) { struct c_expr init; gcc_assert (!after || c_dialect_objc ()); location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init (parser, NULL_TREE, true, braced_init_obstack); else { init = c_parser_expr_no_commas (parser, after); if (init.value != NULL_TREE && TREE_CODE (init.value) != STRING_CST && TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR) init = convert_lvalue_to_rvalue (loc, init, true, true); } process_init_element (loc, init, false, braced_init_obstack); } /* Parse a compound statement (possibly a function body) (C90 6.6.2, C99 6.8.2). compound-statement: { block-item-list[opt] } { label-declarations block-item-list } block-item-list: block-item block-item-list block-item block-item: nested-declaration statement nested-declaration: declaration GNU extensions: compound-statement: { label-declarations block-item-list } nested-declaration: __extension__ nested-declaration nested-function-definition label-declarations: label-declaration label-declarations label-declaration label-declaration: __label__ identifier-list ; Allowing the mixing of declarations and code is new in C99. The GNU syntax also permits (not shown above) labels at the end of compound statements, which yield an error. We don't allow labels on declarations; this might seem like a natural extension, but there would be a conflict between attributes on the label and prefix attributes on the declaration. ??? The syntax follows the old parser in requiring something after label declarations. Although they are erroneous if the labels declared aren't defined, is it useful for the syntax to be this way? OpenACC: block-item: openacc-directive openacc-directive: update-directive OpenMP: block-item: openmp-directive openmp-directive: barrier-directive flush-directive taskwait-directive taskyield-directive cancel-directive cancellation-point-directive */ static tree c_parser_compound_statement (c_parser *parser) { tree stmt; location_t brace_loc; brace_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Ensure a scope is entered and left anyway to avoid confusion if we have just prepared to enter a function body. */ stmt = c_begin_compound_stmt (true); c_end_compound_stmt (brace_loc, stmt, true); return error_mark_node; } stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); /* If the compound stmt contains array notations, then we expand them. */ if (flag_cilkplus && contains_array_notation_expr (stmt)) stmt = expand_array_notation_exprs (stmt); return c_end_compound_stmt (brace_loc, stmt, true); } /* Parse a compound statement except for the opening brace. This is used for parsing both compound statements and statement expressions (which follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart (c_parser *parser) { bool last_stmt = false; bool last_label = false; bool save_valid_for_pragma = valid_location_for_stdc_pragma_p (); location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); return; } mark_valid_location_for_stdc_pragma (true); if (c_parser_next_token_is_keyword (parser, RID_LABEL)) { /* Read zero or more forward-declarations for labels that nested functions can jump to. */ mark_valid_location_for_stdc_pragma (false); while (c_parser_next_token_is_keyword (parser, RID_LABEL)) { label_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } label = declare_label (c_parser_peek_token (parser)->value); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (label_loc, DECL_EXPR, label)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } pedwarn (label_loc, OPT_Wpedantic, "ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); c_parser_consume_token (parser); return; } while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { if (c_parser_next_token_is_keyword (parser, RID_CASE)) label_loc = c_parser_peek_2nd_token (parser)->location; else label_loc = c_parser_peek_token (parser)->location; last_label = true; last_stmt = false; mark_valid_location_for_stdc_pragma (false); c_parser_label (parser); } else if (!last_label && c_parser_next_tokens_start_declaration (parser)) { last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL); if (last_stmt) pedwarn_c90 (loc, OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else if (!last_label && c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declaration (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL); /* Following the old parser, __extension__ does not disable this diagnostic. */ restore_extension_diagnostics (ext); if (last_stmt) pedwarn_c90 (loc, OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { /* External pragmas, and some omp pragmas, are not associated with regular c code, and so are not to be considered statements syntactically. This ensures that the user doesn't put them places that would turn into syntax errors if the directive were ignored. */ if (c_parser_pragma (parser, pragma_compound)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is (parser, CPP_EOF)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); return; } else if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { if (parser->in_if_block) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); error_at (loc, """expected %<}%> before %<else%>"); return; } else { error_at (loc, "%<else%> without a previous %<if%>"); c_parser_consume_token (parser); continue; } } else { statement: last_label = false; last_stmt = true; mark_valid_location_for_stdc_pragma (false); c_parser_statement_after_labels (parser); } parser->error = false; } if (last_label) error_at (label_loc, "label at end of compound statement"); c_parser_consume_token (parser); /* Restore the value we started with. */ mark_valid_location_for_stdc_pragma (save_valid_for_pragma); } /* Parse all consecutive labels. */ static void c_parser_all_labels (c_parser *parser) { while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); } /* Parse a label (C90 6.6.1, C99 6.8.1). label: identifier : attributes[opt] case constant-expression : default : GNU extensions: label: case constant-expression ... constant-expression : The use of attributes on labels is a GNU extension. The syntax in GNU C accepts any expressions without commas, non-constant expressions being rejected later. */ static void c_parser_label (c_parser *parser) { location_t loc1 = c_parser_peek_token (parser)->location; tree label = NULL_TREE; if (c_parser_next_token_is_keyword (parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token (parser); exp1 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); label = do_case (loc1, exp1, NULL_TREE); } else if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); exp2 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, exp1, exp2); } else c_parser_error (parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) { c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token (parser)->value; tree tlab; tree attrs; location_t loc2 = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_NAME)); c_parser_consume_token (parser); gcc_assert (c_parser_next_token_is (parser, CPP_COLON)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); tlab = define_label (loc2, name); if (tlab) { decl_attributes (&tlab, attrs, 0); label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab)); } } if (label) { if (c_parser_next_tokens_start_declaration (parser)) { error_at (c_parser_peek_token (parser)->location, "a label can only be part of a statement and " "a declaration is not a statement"); c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false, /*static_assert_ok*/ true, /*empty_ok*/ true, /*nested*/ true, /*start_attr_ok*/ true, NULL, vNULL); } } } /* Parse a statement (C90 6.6, C99 6.8). statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement labeled-statement: label statement expression-statement: expression[opt] ; selection-statement: if-statement switch-statement iteration-statement: while-statement do-statement for-statement jump-statement: goto identifier ; continue ; break ; return expression[opt] ; GNU extensions: statement: asm-statement jump-statement: goto * expression ; Objective-C: statement: objc-throw-statement objc-try-catch-statement objc-synchronized-statement objc-throw-statement: @throw expression ; @throw ; OpenACC: statement: openacc-construct openacc-construct: parallel-construct kernels-construct data-construct loop-construct parallel-construct: parallel-directive structured-block kernels-construct: kernels-directive structured-block data-construct: data-directive structured-block loop-construct: loop-directive structured-block OpenMP: statement: openmp-construct openmp-construct: parallel-construct for-construct simd-construct for-simd-construct sections-construct single-construct parallel-for-construct parallel-for-simd-construct parallel-sections-construct master-construct critical-construct atomic-construct ordered-construct parallel-construct: parallel-directive structured-block for-construct: for-directive iteration-statement simd-construct: simd-directive iteration-statements for-simd-construct: for-simd-directive iteration-statements sections-construct: sections-directive section-scope single-construct: single-directive structured-block parallel-for-construct: parallel-for-directive iteration-statement parallel-for-simd-construct: parallel-for-simd-directive iteration-statement parallel-sections-construct: parallel-sections-directive section-scope master-construct: master-directive structured-block critical-construct: critical-directive structured-block atomic-construct: atomic-directive expression-statement ordered-construct: ordered-directive structured-block Transactional Memory: statement: transaction-statement transaction-cancel-statement */ static void c_parser_statement (c_parser *parser) { c_parser_all_labels (parser); c_parser_statement_after_labels (parser); } /* Parse a statement, other than a labeled statement. */ static void c_parser_statement_after_labels (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree stmt = NULL_TREE; bool in_if_block = parser->in_if_block; parser->in_if_block = false; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: add_stmt (c_parser_compound_statement (parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_IF: c_parser_if_statement (parser); break; case RID_SWITCH: c_parser_switch_statement (parser); break; case RID_WHILE: c_parser_while_statement (parser, false); break; case RID_DO: c_parser_do_statement (parser, false); break; case RID_FOR: c_parser_for_statement (parser, false); break; case RID_CILK_FOR: if (!flag_cilkplus) { error_at (c_parser_peek_token (parser)->location, "-fcilkplus must be enabled to use %<_Cilk_for%>"); c_parser_skip_to_end_of_block_or_statement (parser); } else c_parser_cilk_for (parser, integer_zero_node); break; case RID_CILK_SYNC: c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); if (!flag_cilkplus) error_at (loc, "-fcilkplus must be enabled to use %<_Cilk_sync%>"); else add_stmt (build_cilk_sync ()); break; case RID_GOTO: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { stmt = c_finish_goto_label (loc, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_expr val; c_parser_consume_token (parser); val = c_parser_expression (parser); if (check_no_cilk (val.value, "Cilk array notation cannot be used as a computed goto expression", "%<_Cilk_spawn%> statement cannot be used as a computed goto expression", loc)) val.value = error_mark_node; val = convert_lvalue_to_rvalue (loc, val, false, true); stmt = c_finish_goto_ptr (loc, val.value); } else c_parser_error (parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = c_finish_return (loc, NULL_TREE, NULL_TREE); c_parser_consume_token (parser); } else { location_t xloc = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression_conv (parser); mark_exp_read (expr.value); stmt = c_finish_return (xloc, expr.value, expr.original_type); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement (parser); break; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: stmt = c_parser_transaction (parser, c_parser_peek_token (parser)->keyword); break; case RID_TRANSACTION_CANCEL: stmt = c_parser_transaction_cancel (parser); goto expect_semicolon; case RID_AT_THROW: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt (loc, NULL_TREE); c_parser_consume_token (parser); } else { struct c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (loc, expr, false, false); if (check_no_cilk (expr.value, "Cilk array notation cannot be used for a throw expression", "%<_Cilk_spawn%> statement cannot be used for a throw expression")) expr.value = error_mark_node; else { expr.value = c_fully_fold (expr.value, false, NULL); stmt = objc_build_throw_stmt (loc, expr.value); } goto expect_semicolon; } break; case RID_AT_TRY: gcc_assert (c_dialect_objc ()); c_parser_objc_try_catch_finally_statement (parser); break; case RID_AT_SYNCHRONIZED: gcc_assert (c_dialect_objc ()); c_parser_objc_synchronized_statement (parser); break; default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token (parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* Avoid infinite loop in error recovery: c_parser_skip_until_found stops at a closing nesting delimiter without consuming it, but here we need to consume it to proceed further. */ c_parser_error (parser, "expected statement"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_stmt); break; default: expr_stmt: stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value); expect_semicolon: c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's OK because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's OK because (recursively) all of the component statements should already have line numbers assigned. ??? Can we discard no-op statements earlier? */ if (CAN_HAVE_LOCATION_P (stmt) && EXPR_LOCATION (stmt) == UNKNOWN_LOCATION) SET_EXPR_LOCATION (stmt, loc); parser->in_if_block = in_if_block; } /* Parse the condition from an if, do, while or for statements. */ static tree c_parser_condition (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree cond; cond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (loc, cond); cond = c_fully_fold (cond, false, NULL); if (warn_sequence_point) verify_sequence_points (cond); return cond; } /* Parse a parenthesized condition from an if, do or while statement. condition: ( expression ) */ static tree c_parser_paren_condition (c_parser *parser) { tree cond; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_mark_node; cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return cond; } /* Parse a statement which is a block in C99. */ static tree c_parser_c99_block_statement (c_parser *parser) { tree block = c_begin_compound_stmt (flag_isoc99); location_t loc = c_parser_peek_token (parser)->location; c_parser_statement (parser); return c_end_compound_stmt (loc, block, flag_isoc99); } /* Parse the body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we track whether the body is an if statement for the sake of -Wparentheses warnings, (c) we handle an empty body specially for the sake of -Wempty-body warnings, and (d) we call parser_compound_statement directly because c_parser_statement_after_labels resets parser->in_if_block. */ static tree c_parser_if_body (c_parser *parser, bool *if_p) { tree block = c_begin_compound_stmt (flag_isoc99); location_t body_loc = c_parser_peek_token (parser)->location; c_parser_all_labels (parser); *if_p = c_parser_next_token_is_keyword (parser, RID_IF); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); if (!c_parser_next_token_is_keyword (parser, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<if%> statement"); } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) add_stmt (c_parser_compound_statement (parser)); else c_parser_statement_after_labels (parser); return c_end_compound_stmt (body_loc, block, flag_isoc99); } /* Parse the else body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we handle an empty body specially for the sake of -Wempty-body warnings. */ static tree c_parser_else_body (c_parser *parser) { location_t else_loc = c_parser_peek_token (parser)->location; tree block = c_begin_compound_stmt (flag_isoc99); c_parser_all_labels (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); } else c_parser_statement_after_labels (parser); return c_end_compound_stmt (else_loc, block, flag_isoc99); } /* Parse an if statement (C90 6.6.4, C99 6.8.4). if-statement: if ( expression ) statement if ( expression ) statement else statement */ static void c_parser_if_statement (c_parser *parser) { tree block; location_t loc; tree cond; bool first_if = false; tree first_body, second_body; bool in_if_block; tree if_stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); if (flag_cilkplus && contains_cilk_spawn_stmt (cond)) { error_at (loc, "if statement cannot contain %<Cilk_spawn%>"); cond = error_mark_node; } in_if_block = parser->in_if_block; parser->in_if_block = true; first_body = c_parser_if_body (parser, &first_if); parser->in_if_block = in_if_block; if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { c_parser_consume_token (parser); second_body = c_parser_else_body (parser); } else second_body = NULL_TREE; c_finish_if_stmt (loc, cond, first_body, second_body, first_if); if_stmt = c_end_compound_stmt (loc, block, flag_isoc99); /* If the if statement contains array notations, then we expand them. */ if (flag_cilkplus && contains_array_notation_expr (if_stmt)) if_stmt = fix_conditional_array_notations (if_stmt); add_stmt (if_stmt); } /* Parse a switch statement (C90 6.6.4, C99 6.8.4). switch-statement: switch (expression) statement */ static void c_parser_switch_statement (c_parser *parser) { struct c_expr ce; tree block, expr, body, save_break; location_t switch_loc = c_parser_peek_token (parser)->location; location_t switch_cond_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); bool explicit_cast_p = false; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { switch_cond_loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) explicit_cast_p = true; ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (switch_cond_loc, ce, true, false); expr = ce.value; /* ??? expr has no valid location? */ if (check_no_cilk (expr, "Cilk array notation cannot be used as a condition for switch statement", "%<_Cilk_spawn%> statement cannot be used as a condition for switch statement", switch_cond_loc)) expr = error_mark_node; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { switch_cond_loc = UNKNOWN_LOCATION; expr = error_mark_node; } c_start_case (switch_loc, switch_cond_loc, expr, explicit_cast_p); save_break = c_break_label; c_break_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_case (body, ce.original_type); if (c_break_label) { location_t here = c_parser_peek_token (parser)->location; tree t = build1 (LABEL_EXPR, void_type_node, c_break_label); SET_EXPR_LOCATION (t, here); add_stmt (t); } c_break_label = save_break; add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99)); } /* Parse a while statement (C90 6.6.5, C99 6.8.5). while-statement: while (expression) statement */ static void c_parser_while_statement (c_parser *parser, bool ivdep) { tree block, cond, body, save_break, save_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for while statement")) cond = error_mark_node; if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse a do statement (C90 6.6.5, C99 6.8.5). do-statement: do statement while ( expression ) ; */ static void c_parser_do_statement (c_parser *parser, bool ivdep) { tree block, cond, body, save_break, save_cont, new_break, new_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) warning_at (c_parser_peek_token (parser)->location, OPT_Wempty_body, "suggest braces around empty body in %<do%> statement"); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition (parser); if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for a do-while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for a do-while statement")) cond = error_mark_node; if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a for statement (C90 6.6.5, C99 6.8.5). for-statement: for ( expression[opt] ; expression[opt] ; expression[opt] ) statement for ( nested-declaration expression[opt] ; expression[opt] ) statement The form with a declaration is new in C99. ??? In accordance with the old parser, the declaration may be a nested function, which is then rejected in check_for_loop_decls, but does it make any sense for this to be included in the grammar? Note in particular that the nested function does not include a trailing ';', whereas the "declaration" production includes one. Also, can we reject bad declarations earlier and cheaper than check_for_loop_decls? In Objective-C, there are two additional variants: foreach-statement: for ( expression in expresssion ) statement for ( declaration in expression ) statement This is inconsistent with C, because the second variant is allowed even if c99 is not enabled. The rest of the comment documents these Objective-C foreach-statement. Here is the canonical example of the first variant: for (object in array) { do something with object } we call the first expression ("object") the "object_expression" and the second expression ("array") the "collection_expression". object_expression must be an lvalue of type "id" (a generic Objective-C object) because the loop works by assigning to object_expression the various objects from the collection_expression. collection_expression must evaluate to something of type "id" which responds to the method countByEnumeratingWithState:objects:count:. The canonical example of the second variant is: for (id object in array) { do something with object } which is completely equivalent to { id object; for (object in array) { do something with object } } Note that initizializing 'object' in some way (eg, "for ((object = xxx) in array) { do something with object }") is possibly technically valid, but completely pointless as 'object' will be assigned to something else as soon as the loop starts. We should most likely reject it (TODO). The beginning of the Objective-C foreach-statement looks exactly like the beginning of the for-statement, and we can tell it is a foreach-statement only because the initial declaration or expression is terminated by 'in' instead of ';'. */ static void c_parser_for_statement (c_parser *parser, bool ivdep) { tree block, cond, incr, save_break, save_cont, body; /* The following are only used when parsing an ObjC foreach statement. */ tree object_expression; /* Silence the bogus uninitialized warning. */ tree collection_expression = NULL; location_t loc = c_parser_peek_token (parser)->location; location_t for_loc = c_parser_peek_token (parser)->location; bool is_foreach_statement = false; gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR)); c_parser_consume_token (parser); /* Open a compound statement in Objective-C as well, just in case this is as foreach expression. */ block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ()); cond = error_mark_node; incr = error_mark_node; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { /* Parse the initialization declaration or expression. */ object_expression = error_mark_node; parser->objc_could_be_foreach_context = c_dialect_objc (); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { parser->objc_could_be_foreach_context = false; c_parser_consume_token (parser); c_finish_expr_stmt (loc, NULL_TREE); } else if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, &object_expression, vNULL); parser->objc_could_be_foreach_context = false; if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (check_for_loop_decls (for_loc, true) == NULL_TREE) c_parser_error (parser, "multiple iterating variables in fast enumeration"); } else check_for_loop_decls (for_loc, flag_isoc99); } else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declaration (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_declaration_or_fndef (parser, true, true, true, true, true, &object_expression, vNULL); parser->objc_could_be_foreach_context = false; restore_extension_diagnostics (ext); if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (check_for_loop_decls (for_loc, true) == NULL_TREE) c_parser_error (parser, "multiple iterating variables in fast enumeration"); } else check_for_loop_decls (for_loc, flag_isoc99); } else goto init_expr; } else { init_expr: { struct c_expr ce; tree init_expression; ce = c_parser_expression (parser); /* In theory we could forbid _Cilk_spawn here, as the spec says "only in top level statement", but it works just fine, so allow it. */ init_expression = ce.value; parser->objc_could_be_foreach_context = false; if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (! lvalue_p (init_expression)) c_parser_error (parser, "invalid iterating variable in fast enumeration"); object_expression = c_fully_fold (init_expression, false, NULL); } else { ce = convert_lvalue_to_rvalue (loc, ce, true, false); init_expression = ce.value; c_finish_expr_stmt (loc, init_expression); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } } /* Parse the loop condition. In the case of a foreach statement, there is no loop condition. */ gcc_assert (!parser->objc_could_be_foreach_context); if (!is_foreach_statement) { if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (ivdep) { c_parser_error (parser, "missing loop condition in loop with " "%<GCC ivdep%> pragma"); cond = error_mark_node; } else { c_parser_consume_token (parser); cond = NULL_TREE; } } else { cond = c_parser_condition (parser); if (check_no_cilk (cond, "Cilk array notation cannot be used in a condition for a for-loop", "%<_Cilk_spawn%> statement cannot be used in a condition for a for-loop")) cond = error_mark_node; c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); } /* Parse the increment expression (the third expression in a for-statement). In the case of a foreach-statement, this is the expression that follows the 'in'. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { if (is_foreach_statement) { c_parser_error (parser, "missing collection in fast enumeration"); collection_expression = error_mark_node; } else incr = c_process_expr_stmt (loc, NULL_TREE); } else { if (is_foreach_statement) collection_expression = c_fully_fold (c_parser_expression (parser).value, false, NULL); else { struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, true, false); incr = c_process_expr_stmt (loc, ce.value); } } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); if (is_foreach_statement) objc_finish_foreach_loop (loc, object_expression, collection_expression, body, c_break_label, c_cont_label); else c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99 || c_dialect_objc ())); c_break_label = save_break; c_cont_label = save_cont; } /* Parse an asm statement, a GNU extension. This is a full-blown asm statement with inputs, outputs, clobbers, and volatile tag allowed. asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm type-qualifier[opt] goto ( asm-goto-argument ) ; asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt] asm-goto-argument: asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \ : asm-goto-operands Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement (c_parser *parser) { tree quals, str, outputs, inputs, clobbers, labels, ret; bool simple, is_goto; location_t asm_loc = c_parser_peek_token (parser)->location; int section, nsections; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_VOLATILE)) { quals = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_CONST) || c_parser_next_token_is_keyword (parser, RID_RESTRICT)) { warning_at (c_parser_peek_token (parser)->location, 0, "%E qualifier ignored on asm", c_parser_peek_token (parser)->value); quals = NULL_TREE; c_parser_consume_token (parser); } else quals = NULL_TREE; is_goto = false; if (c_parser_next_token_is_keyword (parser, RID_GOTO)) { c_parser_consume_token (parser); is_goto = true; } /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; ret = NULL; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto error; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) goto error_close_paren; simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; labels = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; /* Parse each colon-delimited section of operands. */ nsections = 3 + is_goto; for (section = 0; section < nsections; ++section) { if (!c_parser_require (parser, CPP_COLON, is_goto ? "expected %<:%>" : "expected %<:%> or %<)%>")) goto error_close_paren; /* Once past any colon, we're no longer a simple asm. */ simple = false; if ((!c_parser_next_token_is (parser, CPP_COLON) && !c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) || section == 3) switch (section) { case 0: /* For asm goto, we don't allow output operands, but reserve the slot for a future extension that does allow them. */ if (!is_goto) outputs = c_parser_asm_operands (parser); break; case 1: inputs = c_parser_asm_operands (parser); break; case 2: clobbers = c_parser_asm_clobbers (parser); break; case 3: labels = c_parser_asm_goto_operands (parser); break; default: gcc_unreachable (); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; } done_asm: if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs, clobbers, labels, simple)); error: parser->lex_untranslated_string = false; return ret; error_close_paren: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } /* Parse asm operands, a GNU extension. asm-operands: asm-operand asm-operands , asm-operand asm-operand: asm-string-literal ( expression ) [ identifier ] asm-string-literal ( expression ) */ static tree c_parser_asm_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); name = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id)); } else { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) return NULL_TREE; parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = true; return NULL_TREE; } expr = c_parser_expression (parser); mark_exp_read (expr.value); parser->lex_untranslated_string = true; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon (list, build_tree_list (build_tree_list (name, str), expr.value)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm clobbers, a GNU extension. asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers (c_parser *parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal (parser); if (str) list = tree_cons (NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm goto labels, a GNU extension. asm-goto-operands: identifier asm-goto-operands , identifier */ static tree c_parser_asm_goto_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, label; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); name = tok->value; label = lookup_label_for_goto (tok->location, name); c_parser_consume_token (parser); TREE_USED (label) = 1; } else { c_parser_error (parser, "expected identifier"); return NULL_TREE; } name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); list = tree_cons (name, label, list); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else return nreverse (list); } } /* Parse an expression other than a compound expression; that is, an assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= In GNU C we accept any conditional expression on the LHS and diagnose the invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { struct c_expr lhs, rhs, ret; enum tree_code code; location_t op_location, exp_location; gcc_assert (!after || c_dialect_objc ()); lhs = c_parser_conditional_expression (parser, after, omp_atomic_lhs); op_location = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token (parser); exp_location = c_parser_peek_token (parser)->location; rhs = c_parser_expr_no_commas (parser, NULL); rhs = convert_lvalue_to_rvalue (exp_location, rhs, true, true); ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type, code, exp_location, rhs.value, rhs.original_type); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING (ret.value) = 1; ret.original_code = ERROR_MARK; } ret.original_type = NULL; return ret; } /* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression GNU extensions: conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { struct c_expr cond, exp1, exp2, ret; location_t cond_loc, colon_loc, middle_loc; gcc_assert (!after || c_dialect_objc ()); cond = c_parser_binary_expression (parser, after, omp_atomic_lhs); if (c_parser_next_token_is_not (parser, CPP_QUERY)) return cond; cond_loc = c_parser_peek_token (parser)->location; cond = convert_lvalue_to_rvalue (cond_loc, cond, true, true); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { tree eptype = NULL_TREE; middle_loc = c_parser_peek_token (parser)->location; pedwarn (middle_loc, OPT_Wpedantic, "ISO C forbids omitting the middle term of a ?: expression"); warn_for_omitted_condop (middle_loc, cond.value); if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (cond.value); cond.value = TREE_OPERAND (cond.value, 0); } /* Make sure first operand is calculated only once. */ exp1.value = c_save_expr (default_conversion (cond.value)); if (eptype) exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value); exp1.original_type = NULL; cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value); c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (cond_loc, default_conversion (cond.value)); c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv (parser); mark_exp_read (exp1.value); c_inhibit_evaluation_warnings += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } colon_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } { location_t exp2_loc = c_parser_peek_token (parser)->location; exp2 = c_parser_conditional_expression (parser, NULL, NULL_TREE); exp2 = convert_lvalue_to_rvalue (exp2_loc, exp2, true, true); } c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = build_conditional_expr (colon_loc, cond.value, cond.original_code == C_MAYBE_CONST_EXPR, exp1.value, exp1.original_type, exp2.value, exp2.original_type); ret.original_code = ERROR_MARK; if (exp1.value == error_mark_node || exp2.value == error_mark_node) ret.original_type = NULL; else { tree t1, t2; /* If both sides are enum type, the default conversion will have made the type of the result be an integer type. We want to remember the enum types we started with. */ t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value); t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value); ret.original_type = ((t1 != error_mark_node && t2 != error_mark_node && (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))) ? t1 : NULL); } return ret; } /* Parse a binary expression; that is, a logical-OR-expression (C90 6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. OMP_ATOMIC_LHS is NULL, unless parsing OpenMP #pragma omp atomic, when it should be the unfolded lhs. In a valid OpenMP source, one of the operands of the toplevel binary expression must be equal to it. In that case, just return a build2 created binary operation rather than result of parser_build_binary_op. multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression AND-expression: equality-expression AND-expression & equality-expression exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ static struct c_expr c_parser_binary_expression (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { /* A binary expression is parsed using operator-precedence parsing, with the operands being cast expressions. All the binary operators are left-associative. Thus a binary expression is of form: E0 op1 E1 op2 E2 ... which we represent on a stack. On the stack, the precedence levels are strictly increasing. When a new operator is encountered of higher precedence than that at the top of the stack, it is pushed; its LHS is the top expression, and its RHS is everything parsed until it is popped. When a new operator is encountered with precedence less than or equal to that at the top of the stack, triples E[i-1] op[i] E[i] are popped and replaced by the result of the operation until the operator at the top of the stack has lower precedence than the new operator or there is only one element on the stack; then the top expression is the LHS of the new operator. In the case of logical AND and OR expressions, we also need to adjust c_inhibit_evaluation_warnings as appropriate when the operators are pushed and popped. */ struct { /* The expression at this stack level. */ struct c_expr expr; /* The precedence of the operator on its left, PREC_NONE at the bottom of the stack. */ enum c_parser_prec prec; /* The operation on its left. */ enum tree_code op; /* The source location of this operation. */ location_t loc; } stack[NUM_PRECS]; int sp; /* Location of the binary operator. */ location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */ #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_false_node); \ break; \ case TRUTH_ORIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_true_node); \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = convert_lvalue_to_rvalue (stack[sp - 1].loc, \ stack[sp - 1].expr, true, true); \ stack[sp].expr \ = convert_lvalue_to_rvalue (stack[sp].loc, \ stack[sp].expr, true, true); \ if (__builtin_expect (omp_atomic_lhs != NULL_TREE, 0) && sp == 1 \ && c_parser_peek_token (parser)->type == CPP_SEMICOLON \ && ((1 << stack[sp].prec) \ & ((1 << PREC_BITOR) | (1 << PREC_BITXOR) | (1 << PREC_BITAND) \ | (1 << PREC_SHIFT) | (1 << PREC_ADD) | (1 << PREC_MULT))) \ && stack[sp].op != TRUNC_MOD_EXPR \ && stack[0].expr.value != error_mark_node \ && stack[1].expr.value != error_mark_node \ && (c_tree_equal (stack[0].expr.value, omp_atomic_lhs) \ || c_tree_equal (stack[1].expr.value, omp_atomic_lhs))) \ stack[0].expr.value \ = build2 (stack[1].op, TREE_TYPE (stack[0].expr.value), \ stack[0].expr.value, stack[1].expr.value); \ else \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \ stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert (!after || c_dialect_objc ()); stack[0].loc = c_parser_peek_token (parser)->location; stack[0].expr = c_parser_cast_expression (parser, after); stack[0].prec = PREC_NONE; sp = 0; while (true) { enum c_parser_prec oprec; enum tree_code ocode; if (parser->error) goto out; switch (c_parser_peek_token (parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* Not a binary operator, so end of the binary expression. */ goto out; } binary_loc = c_parser_peek_token (parser)->location; while (oprec <= stack[sp].prec) POP; c_parser_consume_token (parser); switch (ocode) { case TRUTH_ANDIF_EXPR: stack[sp].expr = convert_lvalue_to_rvalue (stack[sp].loc, stack[sp].expr, true, true); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_false_node); break; case TRUTH_ORIF_EXPR: stack[sp].expr = convert_lvalue_to_rvalue (stack[sp].loc, stack[sp].expr, true, true); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_true_node); break; default: break; } sp++; stack[sp].loc = binary_loc; stack[sp].expr = c_parser_cast_expression (parser, NULL); stack[sp].prec = oprec; stack[sp].op = ocode; stack[sp].loc = binary_loc; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression (c_parser *parser, struct c_expr *after) { location_t cast_loc = c_parser_peek_token (parser)->location; gcc_assert (!after || c_dialect_objc ()); if (after) return c_parser_postfix_expression_after_primary (parser, cast_loc, *after); /* If the expression begins with a parenthesized type name, it may be either a cast or a compound literal; we need to see whether the next character is '{' to tell the difference. If not, it is an unary expression. Full detection of unknown typenames here would require a 3-token lookahead. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert (type_name->specs->type); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type (parser, type_name, cast_loc); { location_t expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, true, true); } ret.value = c_cast_expr (cast_loc, type_name, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else return c_parser_unary_expression (parser); } /* Parse an unary expression (C90 6.3.3, C99 6.5.3). unary-expression: postfix-expression ++ unary-expression -- unary-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-name ) unary-operator: one of & * + - ~ ! GNU extensions: unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) && identifier (C11 permits _Alignof with type names only.) unary-operator: one of __extension__ __real__ __imag__ Transactional Memory: unary-expression: transaction-expression In addition, the GNU syntax treats ++ and -- as unary operators, so they may be applied to cast expressions with errors for non-lvalues given later. */ static struct c_expr c_parser_unary_expression (c_parser *parser) { int ext; struct c_expr ret, op; location_t op_loc = c_parser_peek_token (parser)->location; location_t exp_loc; ret.original_code = ERROR_MARK; ret.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); /* If there is array notations in op, we expand them. */ if (flag_cilkplus && TREE_CODE (op.value) == ARRAY_NOTATION_REF) return fix_array_notation_expr (exp_loc, PREINCREMENT_EXPR, op); else { op = default_function_array_read_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op); } case CPP_MINUS_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); /* If there is array notations in op, we expand them. */ if (flag_cilkplus && TREE_CODE (op.value) == ARRAY_NOTATION_REF) return fix_array_notation_expr (exp_loc, PREDECREMENT_EXPR, op); else { op = default_function_array_read_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op); } case CPP_AND: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); mark_exp_read (op.value); return parser_build_unary_op (op_loc, ADDR_EXPR, op); case CPP_MULT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); ret.value = build_indirect_ref (op_loc, op.value, RO_UNARY_STAR); return ret; case CPP_PLUS: if (!c_dialect_objc () && !in_system_header_at (input_location)) warning_at (op_loc, OPT_Wtraditional, "traditional C rejects the unary plus operator"); c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token (parser)->value, op_loc); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected identifier"); ret.value = error_mark_node; } return ret; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression (parser); case RID_ALIGNOF: return c_parser_alignof_expression (parser); case RID_EXTENSION: c_parser_consume_token (parser); ext = disable_extension_diagnostics (); ret = c_parser_cast_expression (parser, NULL); restore_extension_diagnostics (ext); return ret; case RID_REALPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, IMAGPART_EXPR, op); case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: return c_parser_transaction_expression (parser, c_parser_peek_token (parser)->keyword); default: return c_parser_postfix_expression (parser); } default: return c_parser_postfix_expression (parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression (c_parser *parser) { struct c_expr expr; location_t expr_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_sizeof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either sizeof ( type-name ) or sizeof unary-expression starting with a compound literal. */ struct c_type_name *type_name; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_sizeof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, expr_loc); goto sizeof_expr; } /* sizeof ( type-name ). */ c_inhibit_evaluation_warnings--; in_sizeof--; return c_expr_sizeof_type (expr_loc, type_name); } else { expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_unary_expression (parser); sizeof_expr: c_inhibit_evaluation_warnings--; in_sizeof--; mark_exp_read (expr.value); if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (expr_loc, "%<sizeof%> applied to a bit-field"); return c_expr_sizeof_expr (expr_loc, expr); } } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; tree alignof_spelling = c_parser_peek_token (parser)->value; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF)); bool is_c11_alignof = strcmp (IDENTIFIER_POINTER (alignof_spelling), "_Alignof") == 0; /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spellings. */ if (is_c11_alignof) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", alignof_spelling); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", alignof_spelling); } c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_alignof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either __alignof__ ( type-name ) or __alignof__ unary-expression starting with a compound literal. */ location_t loc; struct c_type_name *type_name; struct c_expr ret; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_alignof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); goto alignof_expr; } /* alignof ( type-name ). */ c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_sizeof_or_alignof_type (loc, groktypename (type_name, NULL, NULL), false, is_c11_alignof, 1); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else { struct c_expr ret; expr = c_parser_unary_expression (parser); alignof_expr: mark_exp_read (expr.value); c_inhibit_evaluation_warnings--; in_alignof--; pedwarn (loc, OPT_Wpedantic, "ISO C does not allow %<%E (expression)%>", alignof_spelling); ret.value = c_alignof_expr (loc, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } } /* Helper function to read arguments of builtins which are interfaces for the middle-end nodes like COMPLEX_EXPR, VEC_PERM_EXPR and others. The name of the builtin is passed using BNAME parameter. Function returns true if there were no errors while parsing and stores the arguments in CEXPR_LIST. */ static bool c_parser_get_builtin_args (c_parser *parser, const char *bname, vec<c_expr_t, va_gc> **ret_cexpr_list, bool choose_expr_p) { location_t loc = c_parser_peek_token (parser)->location; vec<c_expr_t, va_gc> *cexpr_list; c_expr_t expr; bool saved_force_folding_builtin_constant_p; *ret_cexpr_list = NULL; if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { error_at (loc, "cannot take address of %qs", bname); return false; } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return true; } saved_force_folding_builtin_constant_p = force_folding_builtin_constant_p; force_folding_builtin_constant_p |= choose_expr_p; expr = c_parser_expr_no_commas (parser, NULL); force_folding_builtin_constant_p = saved_force_folding_builtin_constant_p; vec_alloc (cexpr_list, 1); vec_safe_push (cexpr_list, expr); while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); expr = c_parser_expr_no_commas (parser, NULL); vec_safe_push (cexpr_list, expr); } if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) return false; *ret_cexpr_list = cexpr_list; return true; } /* This represents a single generic-association. */ struct c_generic_association { /* The location of the starting token of the type. */ location_t type_location; /* The association's type, or NULL_TREE for 'default'. */ tree type; /* The association's expression. */ struct c_expr expression; }; /* Parse a generic-selection. (C11 6.5.1.1). generic-selection: _Generic ( assignment-expression , generic-assoc-list ) generic-assoc-list: generic-association generic-assoc-list , generic-association generic-association: type-name : assignment-expression default : assignment-expression */ static struct c_expr c_parser_generic_selection (c_parser *parser) { vec<c_generic_association> associations = vNULL; struct c_expr selector, error_expr; tree selector_type; struct c_generic_association matched_assoc; bool match_found = false; location_t generic_loc, selector_loc; error_expr.original_code = ERROR_MARK; error_expr.original_type = NULL; error_expr.value = error_mark_node; matched_assoc.type_location = UNKNOWN_LOCATION; matched_assoc.type = NULL_TREE; matched_assoc.expression = error_expr; gcc_assert (c_parser_next_token_is_keyword (parser, RID_GENERIC)); generic_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (flag_isoc99) pedwarn_c99 (generic_loc, OPT_Wpedantic, "ISO C99 does not support %<_Generic%>"); else pedwarn_c99 (generic_loc, OPT_Wpedantic, "ISO C90 does not support %<_Generic%>"); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_expr; c_inhibit_evaluation_warnings++; selector_loc = c_parser_peek_token (parser)->location; selector = c_parser_expr_no_commas (parser, NULL); selector = default_function_array_conversion (selector_loc, selector); c_inhibit_evaluation_warnings--; if (selector.value == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return selector; } selector_type = TREE_TYPE (selector.value); /* In ISO C terms, rvalues (including the controlling expression of _Generic) do not have qualified types. */ if (TREE_CODE (selector_type) != ARRAY_TYPE) selector_type = TYPE_MAIN_VARIANT (selector_type); /* In ISO C terms, _Noreturn is not part of the type of expressions such as &abort, but in GCC it is represented internally as a type qualifier. */ if (FUNCTION_POINTER_TYPE_P (selector_type) && TYPE_QUALS (TREE_TYPE (selector_type)) != TYPE_UNQUALIFIED) selector_type = build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (selector_type))); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } while (1) { struct c_generic_association assoc, *iter; unsigned int ix; c_token *token = c_parser_peek_token (parser); assoc.type_location = token->location; if (token->type == CPP_KEYWORD && token->keyword == RID_DEFAULT) { c_parser_consume_token (parser); assoc.type = NULL_TREE; } else { struct c_type_name *type_name; type_name = c_parser_type_name (parser); if (type_name == NULL) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error_exit; } assoc.type = groktypename (type_name, NULL, NULL); if (assoc.type == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error_exit; } if (TREE_CODE (assoc.type) == FUNCTION_TYPE) error_at (assoc.type_location, "%<_Generic%> association has function type"); else if (!COMPLETE_TYPE_P (assoc.type)) error_at (assoc.type_location, "%<_Generic%> association has incomplete type"); if (variably_modified_type_p (assoc.type, NULL_TREE)) error_at (assoc.type_location, "%<_Generic%> association has " "variable length type"); } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error_exit; } assoc.expression = c_parser_expr_no_commas (parser, NULL); if (assoc.expression.value == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error_exit; } for (ix = 0; associations.iterate (ix, &iter); ++ix) { if (assoc.type == NULL_TREE) { if (iter->type == NULL_TREE) { error_at (assoc.type_location, "duplicate %<default%> case in %<_Generic%>"); inform (iter->type_location, "original %<default%> is here"); } } else if (iter->type != NULL_TREE) { if (comptypes (assoc.type, iter->type)) { error_at (assoc.type_location, "%<_Generic%> specifies two compatible types"); inform (iter->type_location, "compatible type is here"); } } } if (assoc.type == NULL_TREE) { if (!match_found) { matched_assoc = assoc; match_found = true; } } else if (comptypes (assoc.type, selector_type)) { if (!match_found || matched_assoc.type == NULL_TREE) { matched_assoc = assoc; match_found = true; } else { error_at (assoc.type_location, "%<_Generic> selector matches multiple associations"); inform (matched_assoc.type_location, "other match is here"); } } associations.safe_push (assoc); if (c_parser_peek_token (parser)->type != CPP_COMMA) break; c_parser_consume_token (parser); } associations.release (); if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } if (!match_found) { error_at (selector_loc, "%<_Generic%> selector of type %qT is not " "compatible with any association", selector_type); return error_expr; } return matched_assoc.expression; error_exit: associations.release (); return error_expr; } /* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2). postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list[opt] ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } argument-expression-list: argument-expression argument-expression-list , argument-expression primary-expression: identifier constant string-literal ( expression ) generic-selection GNU extensions: primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( assignment-expression , type-name ) __builtin_offsetof ( type-name , offsetof-member-designator ) __builtin_choose_expr ( assignment-expression , assignment-expression , assignment-expression ) __builtin_types_compatible_p ( type-name , type-name ) __builtin_complex ( assignment-expression , assignment-expression ) __builtin_shuffle ( assignment-expression , assignment-expression ) __builtin_shuffle ( assignment-expression , assignment-expression , assignment-expression, ) offsetof-member-designator: identifier offsetof-member-designator . identifier offsetof-member-designator [ expression ] Objective-C: primary-expression: [ objc-receiver objc-message-args ] @selector ( objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) objc-string-literal Classname . identifier */ static struct c_expr c_parser_postfix_expression (c_parser *parser) { struct c_expr expr, e1; struct c_type_name *t1, *t2; location_t loc = c_parser_peek_token (parser)->location;; expr.original_code = ERROR_MARK; expr.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_NUMBER: expr.value = c_parser_peek_token (parser)->value; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (TREE_CODE (expr.value) == FIXED_CST && !targetm.fixed_point_supported_p ()) { error_at (loc, "fixed-point types not supported for this target"); expr.value = error_mark_node; } break; case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: expr.value = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: expr.value = c_parser_peek_token (parser)->value; expr.original_code = STRING_CST; c_parser_consume_token (parser); break; case CPP_OBJC_STRING: gcc_assert (c_dialect_objc ()); expr.value = objc_build_string_object (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case CPP_NAME: switch (c_parser_peek_token (parser)->id_kind) { case C_ID_ID: { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); expr.value = build_external_ref (loc, id, (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN), &expr.original_type); break; } case C_ID_CLASSNAME: { /* Here we parse the Objective-C 2.0 Class.name dot syntax. */ tree class_name = c_parser_peek_token (parser)->value; tree component; c_parser_consume_token (parser); gcc_assert (c_dialect_objc ()); if (!c_parser_require (parser, CPP_DOT, "expected %<.%>")) { expr.value = error_mark_node; break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; break; } component = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); expr.value = objc_build_class_component_ref (class_name, component); break; } default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } break; case CPP_OPEN_PAREN: /* A parenthesized expression, statement expression or compound literal. */ if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; location_t brace_loc; c_parser_consume_token (parser); brace_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (!building_stmt_list_p ()) { error_at (loc, "braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } stmt = c_begin_stmt_expr (); c_parser_compound_statement_nostart (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pedwarn (loc, OPT_Wpedantic, "ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr (brace_loc, stmt); mark_exp_read (expr.value); } else if (c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* A compound literal. ??? Can we actually get here rather than going directly to c_parser_postfix_expression_after_paren_type from elsewhere? */ location_t loc; struct c_type_name *type_name; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { expr.value = error_mark_node; } else expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); } else { /* A parenthesized expression. */ c_parser_consume_token (parser); expr = c_parser_expression (parser); if (TREE_CODE (expr.value) == MODIFY_EXPR) TREE_NO_WARNING (expr.value) = 1; if (expr.original_code != C_MAYBE_CONST_EXPR) expr.original_code = ERROR_MARK; /* Don't change EXPR.ORIGINAL_TYPE. */ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_FUNCTION_NAME: pedwarn (loc, OPT_Wpedantic, "ISO C does not support " "%<__FUNCTION__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_PRETTY_FUNCTION_NAME: pedwarn (loc, OPT_Wpedantic, "ISO C does not support " "%<__PRETTY_FUNCTION__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_C99_FUNCTION_NAME: pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support " "%<__func__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_VA_ARG: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } e1 = c_parser_expr_no_commas (parser, NULL); mark_exp_read (e1.value); e1.value = c_fully_fold (e1.value, false, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } loc = c_parser_peek_token (parser)->location; t1 = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.value = error_mark_node; } else { tree type_expr = NULL_TREE; expr.value = c_build_va_arg (loc, e1.value, groktypename (t1, &type_expr, NULL)); if (type_expr) { expr.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr.value), type_expr, expr.value); C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true; } } break; case RID_OFFSETOF: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) parser->error = true; if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) gcc_assert (parser->error); if (parser->error) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree type = groktypename (t1, NULL, NULL); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else { offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node); SET_EXPR_LOCATION (offsetof_ref, loc); } /* Parse the second argument to __builtin_offsetof. We must have one identifier, and beyond that we want to accept sub structure and sub array references. */ if (c_parser_next_token_is (parser, CPP_NAME)) { offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_DOT) || c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DEREF)) { if (c_parser_next_token_is (parser, CPP_DEREF)) { loc = c_parser_peek_token (parser)->location; offsetof_ref = build_array_ref (loc, offsetof_ref, integer_zero_node); goto do_dot; } else if (c_parser_next_token_is (parser, CPP_DOT)) { do_dot: c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { struct c_expr ce; tree idx; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); idx = ce.value; idx = c_fully_fold (idx, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref (loc, offsetof_ref, idx); } } } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof (offsetof_ref); } break; case RID_CHOOSE_EXPR: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e1_p, *e2_p, *e3_p; tree c; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_choose_expr", &cexpr_list, true)) { expr.value = error_mark_node; break; } if (vec_safe_length (cexpr_list) != 3) { error_at (loc, "wrong number of arguments to " "%<__builtin_choose_expr%>"); expr.value = error_mark_node; break; } e1_p = &(*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; e3_p = &(*cexpr_list)[2]; c = e1_p->value; mark_exp_read (e2_p->value); mark_exp_read (e3_p->value); if (TREE_CODE (c) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (c))) error_at (loc, "first argument to %<__builtin_choose_expr%> not" " a constant"); constant_expression_warning (c); expr = integer_zerop (c) ? *e3_p : *e2_p; break; } case RID_TYPES_COMPATIBLE_P: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } t2 = c_parser_type_name (parser); if (t2 == NULL) { expr.value = error_mark_node; break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree e1, e2; e1 = groktypename (t1, NULL, NULL); e2 = groktypename (t2, NULL, NULL); if (e1 == error_mark_node || e2 == error_mark_node) { expr.value = error_mark_node; break; } e1 = TYPE_MAIN_VARIANT (e1); e2 = TYPE_MAIN_VARIANT (e2); expr.value = comptypes (e1, e2) ? integer_one_node : integer_zero_node; } break; case RID_BUILTIN_CALL_WITH_STATIC_CHAIN: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e2_p; tree chain_value; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_call_with_static_chain", &cexpr_list, false)) { expr.value = error_mark_node; break; } if (vec_safe_length (cexpr_list) != 2) { error_at (loc, "wrong number of arguments to " "%<__builtin_call_with_static_chain%>"); expr.value = error_mark_node; break; } expr = (*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; *e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true); chain_value = e2_p->value; mark_exp_read (chain_value); if (TREE_CODE (expr.value) != CALL_EXPR) error_at (loc, "first argument to " "%<__builtin_call_with_static_chain%> " "must be a call expression"); else if (TREE_CODE (TREE_TYPE (chain_value)) != POINTER_TYPE) error_at (loc, "second argument to " "%<__builtin_call_with_static_chain%> " "must be a pointer type"); else CALL_EXPR_STATIC_CHAIN (expr.value) = chain_value; break; } case RID_BUILTIN_COMPLEX: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e1_p, *e2_p; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_complex", &cexpr_list, false)) { expr.value = error_mark_node; break; } if (vec_safe_length (cexpr_list) != 2) { error_at (loc, "wrong number of arguments to " "%<__builtin_complex%>"); expr.value = error_mark_node; break; } e1_p = &(*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; *e1_p = convert_lvalue_to_rvalue (loc, *e1_p, true, true); if (TREE_CODE (e1_p->value) == EXCESS_PRECISION_EXPR) e1_p->value = convert (TREE_TYPE (e1_p->value), TREE_OPERAND (e1_p->value, 0)); *e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true); if (TREE_CODE (e2_p->value) == EXCESS_PRECISION_EXPR) e2_p->value = convert (TREE_TYPE (e2_p->value), TREE_OPERAND (e2_p->value, 0)); if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (e1_p->value)) || DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e1_p->value)) || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (e2_p->value)) || DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e2_p->value))) { error_at (loc, "%<__builtin_complex%> operand " "not of real binary floating-point type"); expr.value = error_mark_node; break; } if (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value)) != TYPE_MAIN_VARIANT (TREE_TYPE (e2_p->value))) { error_at (loc, "%<__builtin_complex%> operands of different types"); expr.value = error_mark_node; break; } pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); expr.value = build2 (COMPLEX_EXPR, build_complex_type (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value))), e1_p->value, e2_p->value); break; } case RID_BUILTIN_SHUFFLE: { vec<c_expr_t, va_gc> *cexpr_list; unsigned int i; c_expr_t *p; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_shuffle", &cexpr_list, false)) { expr.value = error_mark_node; break; } FOR_EACH_VEC_SAFE_ELT (cexpr_list, i, p) *p = convert_lvalue_to_rvalue (loc, *p, true, true); if (vec_safe_length (cexpr_list) == 2) expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value, NULL_TREE, (*cexpr_list)[1].value); else if (vec_safe_length (cexpr_list) == 3) expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value, (*cexpr_list)[1].value, (*cexpr_list)[2].value); else { error_at (loc, "wrong number of arguments to " "%<__builtin_shuffle%>"); expr.value = error_mark_node; } break; } case RID_AT_SELECTOR: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } { tree sel = c_parser_objc_selector_arg (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_selector_expr (loc, sel); } break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_protocol_expr (id); } break; case RID_AT_ENCODE: /* Extension to support C-structures in the archiver. */ gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree type = groktypename (t1, NULL, NULL); expr.value = objc_build_encode_expr (type); } break; case RID_GENERIC: expr = c_parser_generic_selection (parser); break; case RID_CILK_SPAWN: c_parser_consume_token (parser); if (!flag_cilkplus) { error_at (loc, "-fcilkplus must be enabled to use " "%<_Cilk_spawn%>"); expr = c_parser_postfix_expression (parser); expr.value = error_mark_node; } else if (c_parser_peek_token (parser)->keyword == RID_CILK_SPAWN) { error_at (loc, "consecutive %<_Cilk_spawn%> keywords " "are not permitted"); /* Now flush out all the _Cilk_spawns. */ while (c_parser_peek_token (parser)->keyword == RID_CILK_SPAWN) c_parser_consume_token (parser); expr = c_parser_postfix_expression (parser); } else { expr = c_parser_postfix_expression (parser); expr.value = build_cilk_spawn (loc, expr.value); } break; default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } break; case CPP_OPEN_SQUARE: if (c_dialect_objc ()) { tree receiver, args; c_parser_consume_token (parser); receiver = c_parser_objc_receiver (parser); args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr (receiver, args); break; } /* Else fall through to report error. */ default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } return c_parser_postfix_expression_after_primary (parser, loc, expr); } /* Parse a postfix expression after a parenthesized type name: the brace-enclosed initializer of a compound literal, possibly followed by some postfix operators. This is separate because it is not possible to tell until after the type name whether a cast expression has a cast or a compound literal, or whether the operand of sizeof is a parenthesized type name or starts with a compound literal. TYPE_LOC is the location where TYPE_NAME starts--the location of the first token after the parentheses around the type name. */ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *parser, struct c_type_name *type_name, location_t type_loc) { tree type; struct c_expr init; bool non_const; struct c_expr expr; location_t start_loc; tree type_expr = NULL_TREE; bool type_expr_const = true; check_compound_literal_type (type_loc, type_name); start_init (NULL_TREE, NULL, 0); type = groktypename (type_name, &type_expr, &type_expr_const); start_loc = c_parser_peek_token (parser)->location; if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type)) { error_at (type_loc, "compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init (parser, type, false, NULL); finish_init (); maybe_warn_string_init (type_loc, type, init); if (type != error_mark_node && !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)) && current_function_decl) { error ("compound literal qualified by address-space qualifier"); type = error_mark_node; } pedwarn_c90 (start_loc, OPT_Wpedantic, "ISO C90 forbids compound literals"); non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR) ? CONSTRUCTOR_NON_CONST (init.value) : init.original_code == C_MAYBE_CONST_EXPR); non_const |= !type_expr_const; expr.value = build_compound_literal (start_loc, type, init.value, non_const); expr.original_code = ERROR_MARK; expr.original_type = NULL; if (type_expr) { if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR) { gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE); C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr; } else { gcc_assert (!non_const); expr.value = build2 (C_MAYBE_CONST_EXPR, type, type_expr, expr.value); } } return c_parser_postfix_expression_after_primary (parser, start_loc, expr); } /* Callback function for sizeof_pointer_memaccess_warning to compare types. */ static bool sizeof_ptr_memacc_comptypes (tree type1, tree type2) { return comptypes (type1, type2) == 1; } /* Parse a postfix expression after the initial primary or compound literal; that is, parse a series of postfix operators. EXPR_LOC is the location of the primary expression. */ static struct c_expr c_parser_postfix_expression_after_primary (c_parser *parser, location_t expr_loc, struct c_expr expr) { struct c_expr orig_expr; tree ident, idx; location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int literal_zero_mask; unsigned int i; vec<tree, va_gc> *exprlist; vec<tree, va_gc> *origtypes = NULL; vec<location_t> arg_loc = vNULL; while (true) { location_t op_loc = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token (parser); if (flag_cilkplus && c_parser_peek_token (parser)->type == CPP_COLON) /* If we are here, then we have something like this: Array [ : ] */ expr.value = c_parser_array_notation (expr_loc, parser, NULL_TREE, expr.value); else { idx = c_parser_expression (parser).value; /* Here we have 3 options: 1. Array [EXPR] -- Normal Array call. 2. Array [EXPR : EXPR] -- Array notation without stride. 3. Array [EXPR : EXPR : EXPR] -- Array notation with stride. For 1, we just handle it just like a normal array expression. For 2 and 3 we handle it like we handle array notations. The idx value we have above becomes the initial/start index. */ if (flag_cilkplus && c_parser_peek_token (parser)->type == CPP_COLON) expr.value = c_parser_array_notation (expr_loc, parser, idx, expr.value); else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = build_array_ref (op_loc, expr.value, idx); } } expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token (parser); for (i = 0; i < 3; i++) { sizeof_arg[i] = NULL_TREE; sizeof_arg_loc[i] = UNKNOWN_LOCATION; } literal_zero_mask = 0; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) exprlist = NULL; else exprlist = c_parser_expr_list (parser, true, false, &origtypes, sizeof_arg_loc, sizeof_arg, &arg_loc, &literal_zero_mask); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); orig_expr = expr; mark_exp_read (expr.value); if (warn_sizeof_pointer_memaccess) sizeof_pointer_memaccess_warning (sizeof_arg_loc, expr.value, exprlist, sizeof_arg, sizeof_ptr_memacc_comptypes); if (warn_memset_transposed_args && TREE_CODE (expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (expr.value) == BUILT_IN_MEMSET && vec_safe_length (exprlist) == 3 && integer_zerop ((*exprlist)[2]) && (literal_zero_mask & (1 << 2)) != 0 && (!integer_zerop ((*exprlist)[1]) || (literal_zero_mask & (1 << 1)) == 0)) warning_at (expr_loc, OPT_Wmemset_transposed_args, "%<memset%> used with constant zero length parameter; " "this could be due to transposed parameters"); expr.value = c_build_function_call_vec (expr_loc, arg_loc, expr.value, exprlist, origtypes); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) == INTEGER_CST && TREE_CODE (orig_expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P) expr.original_code = C_MAYBE_CONST_EXPR; expr.original_type = NULL; if (exprlist) { release_tree_vector (exprlist); release_tree_vector (origtypes); } arg_loc.release (); break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, expr.value, ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, true, false); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, build_indirect_ref (op_loc, expr.value, RO_ARROW), ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_PLUS_PLUS: /* Postincrement. */ c_parser_consume_token (parser); /* If the expressions have array notations, we expand them. */ if (flag_cilkplus && TREE_CODE (expr.value) == ARRAY_NOTATION_REF) expr = fix_array_notation_expr (expr_loc, POSTINCREMENT_EXPR, expr); else { expr = default_function_array_read_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR, expr.value, 0); } expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_MINUS_MINUS: /* Postdecrement. */ c_parser_consume_token (parser); /* If the expressions have array notations, we expand them. */ if (flag_cilkplus && TREE_CODE (expr.value) == ARRAY_NOTATION_REF) expr = fix_array_notation_expr (expr_loc, POSTDECREMENT_EXPR, expr); else { expr = default_function_array_read_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR, expr.value, 0); } expr.original_code = ERROR_MARK; expr.original_type = NULL; break; default: return expr; } } } /* Parse an expression (C90 6.3.17, C99 6.5.17). expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression (c_parser *parser) { location_t tloc = c_parser_peek_token (parser)->location; struct c_expr expr; expr = c_parser_expr_no_commas (parser, NULL); if (c_parser_next_token_is (parser, CPP_COMMA)) expr = convert_lvalue_to_rvalue (tloc, expr, true, false); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; tree lhsval; location_t loc = c_parser_peek_token (parser)->location; location_t expr_loc; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; lhsval = expr.value; while (TREE_CODE (lhsval) == COMPOUND_EXPR) lhsval = TREE_OPERAND (lhsval, 1); if (DECL_P (lhsval) || handled_component_p (lhsval)) mark_exp_read (lhsval); next = c_parser_expr_no_commas (parser, NULL); next = convert_lvalue_to_rvalue (expr_loc, next, true, false); expr.value = build_compound_expr (loc, expr.value, next.value); expr.original_code = COMPOUND_EXPR; expr.original_type = next.original_type; } return expr; } /* Parse an expression and convert functions or arrays to pointers and lvalues to rvalues. */ static struct c_expr c_parser_expression_conv (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (loc, expr, true, false); return expr; } /* Helper function of c_parser_expr_list. Check if IDXth (0 based) argument is a literal zero alone and if so, set it in literal_zero_mask. */ static inline void c_parser_check_literal_zero (c_parser *parser, unsigned *literal_zero_mask, unsigned int idx) { if (idx >= HOST_BITS_PER_INT) return; c_token *tok = c_parser_peek_token (parser); switch (tok->type) { case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: case CPP_CHAR16: case CPP_CHAR32: /* If a parameter is literal zero alone, remember it for -Wmemset-transposed-args warning. */ if (integer_zerop (tok->value) && !TREE_OVERFLOW (tok->value) && (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) *literal_zero_mask |= 1U << idx; default: break; } } /* Parse a non-empty list of expressions. If CONVERT_P, convert functions and arrays to pointers and lvalues to rvalues. If FOLD_P, fold the expressions. If LOCATIONS is non-NULL, save the locations of function arguments into this vector. nonempty-expr-list: assignment-expression nonempty-expr-list , assignment-expression */ static vec<tree, va_gc> * c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p, vec<tree, va_gc> **p_orig_types, location_t *sizeof_arg_loc, tree *sizeof_arg, vec<location_t> *locations, unsigned int *literal_zero_mask) { vec<tree, va_gc> *ret; vec<tree, va_gc> *orig_types; struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; location_t cur_sizeof_arg_loc = UNKNOWN_LOCATION; unsigned int idx = 0; ret = make_tree_vector (); if (p_orig_types == NULL) orig_types = NULL; else orig_types = make_tree_vector (); if (sizeof_arg != NULL && c_parser_next_token_is_keyword (parser, RID_SIZEOF)) cur_sizeof_arg_loc = c_parser_peek_2nd_token (parser)->location; if (literal_zero_mask) c_parser_check_literal_zero (parser, literal_zero_mask, 0); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = convert_lvalue_to_rvalue (loc, expr, true, true); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); ret->quick_push (expr.value); if (orig_types) orig_types->quick_push (expr.original_type); if (locations) locations->safe_push (loc); if (sizeof_arg != NULL && cur_sizeof_arg_loc != UNKNOWN_LOCATION && expr.original_code == SIZEOF_EXPR) { sizeof_arg[0] = c_last_sizeof_arg; sizeof_arg_loc[0] = cur_sizeof_arg_loc; } while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; if (sizeof_arg != NULL && c_parser_next_token_is_keyword (parser, RID_SIZEOF)) cur_sizeof_arg_loc = c_parser_peek_2nd_token (parser)->location; else cur_sizeof_arg_loc = UNKNOWN_LOCATION; if (literal_zero_mask) c_parser_check_literal_zero (parser, literal_zero_mask, idx + 1); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = convert_lvalue_to_rvalue (loc, expr, true, true); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); vec_safe_push (ret, expr.value); if (orig_types) vec_safe_push (orig_types, expr.original_type); if (locations) locations->safe_push (loc); if (++idx < 3 && sizeof_arg != NULL && cur_sizeof_arg_loc != UNKNOWN_LOCATION && expr.original_code == SIZEOF_EXPR) { sizeof_arg[idx] = c_last_sizeof_arg; sizeof_arg_loc[idx] = cur_sizeof_arg_loc; } } if (orig_types) *p_orig_types = orig_types; return ret; } /* Parse Objective-C-specific constructs. */ /* Parse an objc-class-definition. objc-class-definition: @interface identifier objc-superclass[opt] objc-protocol-refs[opt] objc-class-instance-variables[opt] objc-methodprotolist @end @implementation identifier objc-superclass[opt] objc-class-instance-variables[opt] @interface identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist @end @interface identifier ( ) objc-protocol-refs[opt] objc-methodprotolist @end @implementation identifier ( identifier ) objc-superclass: : identifier "@interface identifier (" must start "@interface identifier ( identifier ) ...": objc-methodprotolist in the first production may not start with a parenthesized identifier as a declarator of a data definition with no declaration specifiers if the objc-superclass, objc-protocol-refs and objc-class-instance-variables are omitted. */ static void c_parser_objc_class_definition (c_parser *parser, tree attributes) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) iface_p = false; else gcc_unreachable (); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* We have a category or class extension. */ tree id2; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { if (iface_p && c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { /* We have a class extension. */ id2 = NULL_TREE; } else { c_parser_error (parser, "expected identifier or %<)%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return; } } else { id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!iface_p) { objc_start_category_implementation (id1, id2); return; } if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_category_interface (id1, id2, proto, attributes); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); return; } if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } superclass = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_class_interface (id1, superclass, proto, attributes); } else objc_start_class_implementation (id1, superclass); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables (parser); if (iface_p) { objc_continue_interface (); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); } else { objc_continue_implementation (); return; } } /* Parse objc-class-instance-variables. objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } objc-instance-variable-decl-list: objc-visibility-spec objc-instance-variable-decl ; ; objc-instance-variable-decl-list objc-visibility-spec objc-instance-variable-decl-list objc-instance-variable-decl ; objc-instance-variable-decl-list ; objc-visibility-spec: @private @protected @public objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables (c_parser *parser) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); while (c_parser_next_token_is_not (parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "extra semicolon"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PRIVATE); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PROTECTED); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PUBLIC); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PACKAGE); continue; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration (parser); if (decls == NULL) { /* There is a syntax error. We want to skip the offending tokens up to the next ';' (included) or '}' (excluded). */ /* First, skip manually a ')' or ']'. This is because they reduce the nesting level, so c_parser_skip_until_found() wouldn't be able to skip past them. */ c_token *token = c_parser_peek_token (parser); if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) c_parser_consume_token (parser); /* Then, do the standard skipping. */ c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); /* We hopefully recovered. Start normal parsing again. */ parser->error = false; continue; } else { /* Comma-separated instance variables are chained together in reverse order; add them one by one. */ tree ivar = nreverse (decls); for (; ivar; ivar = DECL_CHAIN (ivar)) objc_add_instance_variable (copy_node (ivar)); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } /* Parse an objc-class-declaration. objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration (c_parser *parser) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } id = c_parser_peek_token (parser)->value; objc_declare_class (id); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse an objc-alias-declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration (c_parser *parser) { tree id1, id2; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias (id1, id2); } /* Parse an objc-protocol-definition. objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end @protocol identifier-list ; "@protocol identifier ;" should be resolved as "@protocol identifier-list ;": objc-methodprotolist may not start with a semicolon in the first alternative if objc-protocol-refs are omitted. */ static void c_parser_objc_protocol_definition (c_parser *parser, tree attributes) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON) { /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; objc_declare_protocol (id, attributes); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { tree id = c_parser_peek_token (parser)->value; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); parser->objc_pq_context = true; objc_start_protocol (id, proto, attributes); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); parser->objc_pq_context = false; objc_finish_interface (); } } /* Parse an objc-method-type. objc-method-type: + - Return true if it is a class method (+) and false if it is an instance method (-). */ static inline bool c_parser_objc_method_type (c_parser *parser) { switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: c_parser_consume_token (parser); return true; case CPP_MINUS: c_parser_consume_token (parser); return false; default: gcc_unreachable (); } } /* Parse an objc-method-definition. objc-method-definition: objc-method-type objc-method-decl ;[opt] compound-statement */ static void c_parser_objc_method_definition (c_parser *parser) { bool is_class_method = c_parser_objc_method_type (parser); tree decl, attributes = NULL_TREE, expr = NULL_TREE; parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser, is_class_method, &attributes, &expr); if (decl == error_mark_node) return; /* Bail here. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "extra semicolon in method definition specified"); } if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_error (parser, "expected %<{%>"); return; } parser->objc_pq_context = false; if (objc_start_method_definition (is_class_method, decl, attributes, expr)) { add_stmt (c_parser_compound_statement (parser)); objc_finish_method_definition (current_function_decl); } else { /* This code is executed when we find a method definition outside of an @implementation context (or invalid for other reasons). Parse the method (to keep going) but do not emit any code. */ c_parser_compound_statement (parser); } } /* Parse an objc-methodprotolist. objc-methodprotolist: empty objc-methodprotolist objc-methodproto objc-methodprotolist declaration objc-methodprotolist ; @optional @required The declaration is a data definition, which may be missing declaration specifiers under the same rules and diagnostics as other data definitions outside functions, and the stray semicolon is diagnosed the same way as a stray semicolon outside a function. */ static void c_parser_objc_methodprotolist (c_parser *parser) { while (true) { /* The list is terminated by @end. */ switch (c_parser_peek_token (parser)->type) { case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PLUS: case CPP_MINUS: c_parser_objc_methodproto (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_external); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword (parser, RID_AT_END)) return; else if (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY)) c_parser_objc_at_property_declaration (parser); else if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL)) { objc_set_method_opt (true); c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED)) { objc_set_method_opt (false); c_parser_consume_token (parser); } else c_parser_declaration_or_fndef (parser, false, false, true, false, true, NULL, vNULL); break; } } } /* Parse an objc-methodproto. objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto (c_parser *parser) { bool is_class_method = c_parser_objc_method_type (parser); tree decl, attributes = NULL_TREE; /* Remember protocol qualifiers in prototypes. */ parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser, is_class_method, &attributes, NULL); /* Forget protocol qualifiers now. */ parser->objc_pq_context = false; /* Do not allow the presence of attributes to hide an erroneous method implementation in the interface section. */ if (!c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_error (parser, "expected %<;%>"); return; } if (decl != error_mark_node) objc_add_method_declaration (is_class_method, decl, attributes); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* If we are at a position that method attributes may be present, check that there are not any parsed already (a syntax error) and then collect any specified at the current location. Finally, if new attributes were present, check that the next token is legal ( ';' for decls and '{' for defs). */ static bool c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes) { bool bad = false; if (*attributes) { c_parser_error (parser, "method attributes must be specified at the end only"); *attributes = NULL_TREE; bad = true; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) *attributes = c_parser_attributes (parser); /* If there were no attributes here, just report any earlier error. */ if (*attributes == NULL_TREE || bad) return bad; /* If the attributes are followed by a ; or {, then just report any earlier error. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return bad; /* We've got attributes, but not at the end. */ c_parser_error (parser, "expected %<;%> or %<{%> after method attribute definition"); return true; } /* Parse an objc-method-decl. objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( objc-type-name ) objc-keyword-selector objc-optparmlist objc-keyword-selector objc-optparmlist attributes objc-keyword-selector: objc-keyword-decl objc-keyword-selector objc-keyword-decl objc-keyword-decl: objc-selector : ( objc-type-name ) identifier objc-selector : identifier : ( objc-type-name ) identifier : identifier objc-optparmlist: objc-optparms objc-optellipsis objc-optparms: empty objc-opt-parms , parameter-declaration objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl (c_parser *parser, bool is_class_method, tree *attributes, tree *expr) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; bool attr_err = false; *attributes = NULL_TREE; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); type = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } sel = c_parser_objc_selector (parser); /* If there is no selector, or a colon follows, we have an objc-keyword-selector. If there is a selector, and a colon does not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is (parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { tree atype = NULL_TREE, id, keyworddecl; tree param_attr = NULL_TREE; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); atype = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } /* New ObjC allows attributes on method parameters. */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) param_attr = c_parser_attributes (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); keyworddecl = objc_build_keyword_decl (tsel, atype, id, param_attr); list = chainon (list, keyworddecl); tsel = c_parser_objc_selector (parser); if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; /* Parse the optional parameter list. Optional Objective-C method parameters follow the C syntax, and may include '...' to denote a variable number of arguments. */ parms = make_node (TREE_LIST); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token (parser); attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; break; } parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) break; parms = chainon (parms, build_tree_list (NULL_TREE, grokparm (parm, expr))); } sel = list; } else attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; if (sel == NULL) { c_parser_error (parser, "objective-c method declaration is expected"); return error_mark_node; } if (attr_err) return error_mark_node; return objc_build_method_signature (is_class_method, type, sel, parms, ellipsis); } /* Parse an objc-type-name. objc-type-name: objc-type-qualifiers[opt] type-name objc-type-qualifiers[opt] objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers objc-type-qualifier objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name (c_parser *parser) { tree quals = NULL_TREE; struct c_type_name *type_name = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { quals = chainon (build_tree_list (NULL_TREE, token->value), quals); c_parser_consume_token (parser); } else break; } if (c_parser_next_tokens_start_typename (parser, cla_prefer_type)) type_name = c_parser_type_name (parser); if (type_name) type = groktypename (type_name, NULL, NULL); /* If the type is unknown, and error has already been produced and we need to recover from the error. In that case, use NULL_TREE for the type, as if no type had been specified; this will use the default type ('id') which is good for error recovery. */ if (type == error_mark_node) type = NULL_TREE; return build_tree_list (quals, type); } /* Parse objc-protocol-refs. objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is (parser, CPP_LESS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_require (parser, CPP_GREATER, "expected %<>%>"); return list; } /* Parse an objc-try-catch-finally-statement. objc-try-catch-finally-statement: @try compound-statement objc-catch-list[opt] @try compound-statement objc-catch-list[opt] @finally compound-statement objc-catch-list: @catch ( objc-catch-parameter-declaration ) compound-statement objc-catch-list @catch ( objc-catch-parameter-declaration ) compound-statement objc-catch-parameter-declaration: parameter-declaration '...' where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS. PS: This function is identical to cp_parser_objc_try_catch_finally_statement for C++. Keep them in sync. */ static void c_parser_objc_try_catch_finally_statement (c_parser *parser) { location_t location; tree stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY)); c_parser_consume_token (parser); location = c_parser_peek_token (parser)->location; objc_maybe_warn_exceptions (location); stmt = c_parser_compound_statement (parser); objc_begin_try_stmt (location, stmt); while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH)) { struct c_parm *parm; tree parameter_declaration = error_mark_node; bool seen_open_paren = false; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) seen_open_paren = true; if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { /* We have "@catch (...)" (where the '...' are literally what is in the code). Skip the '...'. parameter_declaration is set to NULL_TREE, and objc_being_catch_clauses() knows that that means '...'. */ c_parser_consume_token (parser); parameter_declaration = NULL_TREE; } else { /* We have "@catch (NSException *exception)" or something like that. Parse the parameter declaration. */ parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) parameter_declaration = error_mark_node; else parameter_declaration = grokparm (parm, NULL); } if (seen_open_paren) c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); else { /* If there was no open parenthesis, we are recovering from an error, and we are trying to figure out what mistake the user has made. */ /* If there is an immediate closing parenthesis, the user probably forgot the opening one (ie, they typed "@catch NSException *e)". Parse the closing parenthesis and keep going. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); /* If these is no immediate closing parenthesis, the user probably doesn't know that parenthesis are required at all (ie, they typed "@catch NSException *e"). So, just forget about the closing parenthesis and keep going. */ } objc_begin_catch_clause (parameter_declaration); if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart (parser); objc_finish_catch_clause (); } if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY)) { c_parser_consume_token (parser); location = c_parser_peek_token (parser)->location; stmt = c_parser_compound_statement (parser); objc_build_finally_clause (location, stmt); } objc_finish_try_stmt (); } /* Parse an objc-synchronized-statement. objc-synchronized-statement: @synchronized ( expression ) compound-statement */ static void c_parser_objc_synchronized_statement (c_parser *parser) { location_t loc; tree expr, stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; objc_maybe_warn_exceptions (loc); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); expr = ce.value; expr = c_fully_fold (expr, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; stmt = c_parser_compound_statement (parser); objc_build_synchronized (loc, expr, stmt); } /* Parse an objc-selector; return NULL_TREE without an error if the next token is not an objc-selector. objc-selector: identifier one of enum struct union if else while do for switch case default break continue return goto asm sizeof typeof __alignof unsigned long const short volatile signed restrict _Complex in out inout bycopy byref oneway int char float double void _Bool _Atomic ??? Why this selection of keywords but not, for example, storage class specifiers? */ static tree c_parser_objc_selector (c_parser *parser) { c_token *token = c_parser_peek_token (parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token (parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_BOOL: case RID_ATOMIC: case RID_AUTO_TYPE: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: c_parser_consume_token (parser); return value; default: return NULL_TREE; } } /* Parse an objc-selector-arg. objc-selector-arg: objc-selector objc-keywordname-list objc-keywordname-list: objc-keywordname objc-keywordname-list objc-keywordname objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; list = chainon (list, build_tree_list (sel, NULL_TREE)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-receiver. objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->type == CPP_NAME && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return objc_get_class_reference (id); } struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); return c_fully_fold (ce.value, false, NULL); } /* Parse objc-message-args. objc-message-args: objc-selector objc-keywordarg-list objc-keywordarg-list: objc-keywordarg objc-keywordarg-list objc-keywordarg objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return error_mark_node; keywordexpr = c_parser_objc_keywordexpr (parser); list = chainon (list, build_tree_list (sel, keywordexpr)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-keywordexpr. objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr (c_parser *parser) { tree ret; vec<tree, va_gc> *expr_list = c_parser_expr_list (parser, true, true, NULL, NULL, NULL, NULL); if (vec_safe_length (expr_list) == 1) { /* Just return the expression, remove a level of indirection. */ ret = (*expr_list)[0]; } else { /* We have a comma expression, we will collapse later. */ ret = build_tree_list_vec (expr_list); } release_tree_vector (expr_list); return ret; } /* A check, needed in several places, that ObjC interface, implementation or method definitions are not prefixed by incorrect items. */ static bool c_parser_objc_diagnose_bad_element_prefix (c_parser *parser, struct c_declspecs *specs) { if (!specs->declspecs_seen_p || specs->non_sc_seen_p || specs->typespec_kind != ctsk_none) { c_parser_error (parser, "no type or storage class may be specified here,"); c_parser_skip_to_end_of_block_or_statement (parser); return true; } return false; } /* Parse an Objective-C @property declaration. The syntax is: objc-property-declaration: '@property' objc-property-attributes[opt] struct-declaration ; objc-property-attributes: '(' objc-property-attribute-list ')' objc-property-attribute-list: objc-property-attribute objc-property-attribute-list, objc-property-attribute objc-property-attribute 'getter' = identifier 'setter' = identifier 'readonly' 'readwrite' 'assign' 'retain' 'copy' 'nonatomic' For example: @property NSString *name; @property (readonly) id object; @property (retain, nonatomic, getter=getTheName) id name; @property int a, b, c; PS: This function is identical to cp_parser_objc_at_propery_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_property_declaration (c_parser *parser) { /* The following variables hold the attributes of the properties as parsed. They are 'false' or 'NULL_TREE' if the attribute was not seen. When we see an attribute, we set them to 'true' (if they are boolean properties) or to the identifier (if they have an argument, ie, for getter and setter). Note that here we only parse the list of attributes, check the syntax and accumulate the attributes that we find. objc_add_property_declaration() will then process the information. */ bool property_assign = false; bool property_copy = false; tree property_getter_ident = NULL_TREE; bool property_nonatomic = false; bool property_readonly = false; bool property_readwrite = false; bool property_retain = false; tree property_setter_ident = NULL_TREE; /* 'properties' is the list of properties that we read. Usually a single one, but maybe more (eg, in "@property int a, b, c;" there are three). */ tree properties; location_t loc; loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY)); c_parser_consume_token (parser); /* Eat '@property'. */ /* Parse the optional attribute list... */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* Eat the '(' */ c_parser_consume_token (parser); /* Property attribute keywords are valid now. */ parser->objc_property_attr_context = true; while (true) { bool syntax_error = false; c_token *token = c_parser_peek_token (parser); enum rid keyword; if (token->type != CPP_KEYWORD) { if (token->type == CPP_CLOSE_PAREN) c_parser_error (parser, "expected identifier"); else { c_parser_consume_token (parser); c_parser_error (parser, "unknown property attribute"); } break; } keyword = token->keyword; c_parser_consume_token (parser); switch (keyword) { case RID_ASSIGN: property_assign = true; break; case RID_COPY: property_copy = true; break; case RID_NONATOMIC: property_nonatomic = true; break; case RID_READONLY: property_readonly = true; break; case RID_READWRITE: property_readwrite = true; break; case RID_RETAIN: property_retain = true; break; case RID_GETTER: case RID_SETTER: if (c_parser_next_token_is_not (parser, CPP_EQ)) { if (keyword == RID_GETTER) c_parser_error (parser, "missing %<=%> (after %<getter%> attribute)"); else c_parser_error (parser, "missing %<=%> (after %<setter%> attribute)"); syntax_error = true; break; } c_parser_consume_token (parser); /* eat the = */ if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); syntax_error = true; break; } if (keyword == RID_SETTER) { if (property_setter_ident != NULL_TREE) c_parser_error (parser, "the %<setter%> attribute may only be specified once"); else property_setter_ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COLON)) c_parser_error (parser, "setter name must terminate with %<:%>"); else c_parser_consume_token (parser); } else { if (property_getter_ident != NULL_TREE) c_parser_error (parser, "the %<getter%> attribute may only be specified once"); else property_getter_ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } break; default: c_parser_error (parser, "unknown property attribute"); syntax_error = true; break; } if (syntax_error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } parser->objc_property_attr_context = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } /* ... and the property declaration(s). */ properties = c_parser_struct_declaration (parser); if (properties == error_mark_node) { c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } if (properties == NULL_TREE) c_parser_error (parser, "expected identifier"); else { /* Comma-separated properties are chained together in reverse order; add them one by one. */ properties = nreverse (properties); for (; properties; properties = TREE_CHAIN (properties)) objc_add_property_declaration (loc, copy_node (properties), property_readonly, property_readwrite, property_assign, property_retain, property_copy, property_nonatomic, property_getter_ident, property_setter_ident); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); parser->error = false; } /* Parse an Objective-C @synthesize declaration. The syntax is: objc-synthesize-declaration: @synthesize objc-synthesize-identifier-list ; objc-synthesize-identifier-list: objc-synthesize-identifier objc-synthesize-identifier-list, objc-synthesize-identifier objc-synthesize-identifier identifier identifier = identifier For example: @synthesize MyProperty; @synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty; PS: This function is identical to cp_parser_objc_at_synthesize_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_synthesize_declaration (c_parser *parser) { tree list = NULL_TREE; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE)); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (true) { tree property, ivar; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); /* Once we find the semicolon, we can resume normal parsing. We have to reset parser->error manually because c_parser_skip_until_found() won't reset it for us if the next token is precisely a semicolon. */ parser->error = false; return; } property = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } ivar = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else ivar = NULL_TREE; list = chainon (list, build_tree_list (ivar, property)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_add_synthesize_declaration (loc, list); } /* Parse an Objective-C @dynamic declaration. The syntax is: objc-dynamic-declaration: @dynamic identifier-list ; For example: @dynamic MyProperty; @dynamic MyProperty, AnotherProperty; PS: This function is identical to cp_parser_objc_at_dynamic_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_dynamic_declaration (c_parser *parser) { tree list = NULL_TREE; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC)); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (true) { tree property; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } property = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, property)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_add_dynamic_declaration (loc, list); } /* Handle pragmas. Some OpenMP pragmas are associated with, and therefore should be considered, statements. ALLOW_STMT is true if we're within the context of a function and such pragmas are to be allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma (c_parser *parser, enum pragma_context context) { unsigned int id; id = c_parser_peek_token (parser)->pragma_kind; gcc_assert (id != PRAGMA_NONE); switch (id) { case PRAGMA_OACC_ENTER_DATA: c_parser_oacc_enter_exit_data (parser, true); return false; case PRAGMA_OACC_EXIT_DATA: c_parser_oacc_enter_exit_data (parser, false); return false; case PRAGMA_OACC_UPDATE: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma acc update%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_oacc_update (parser); return false; case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp barrier%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_barrier (parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp flush%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_flush (parser); return false; case PRAGMA_OMP_TASKWAIT: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp taskwait%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_taskwait (parser); return false; case PRAGMA_OMP_TASKYIELD: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp taskyield%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_taskyield (parser); return false; case PRAGMA_OMP_CANCEL: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp cancel%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_cancel (parser); return false; case PRAGMA_OMP_CANCELLATION_POINT: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp cancellation point%> may " "only be used in compound statements"); goto bad_stmt; } c_parser_omp_cancellation_point (parser); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate (parser); return false; case PRAGMA_OMP_TARGET: return c_parser_omp_target (parser, context); case PRAGMA_OMP_END_DECLARE_TARGET: c_parser_omp_end_declare_target (parser); return false; case PRAGMA_OMP_SECTION: error_at (c_parser_peek_token (parser)->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_OMP_DECLARE_REDUCTION: c_parser_omp_declare (parser, context); return false; case PRAGMA_IVDEP: c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); if (!c_parser_next_token_is_keyword (parser, RID_FOR) && !c_parser_next_token_is_keyword (parser, RID_WHILE) && !c_parser_next_token_is_keyword (parser, RID_DO)) { c_parser_error (parser, "for, while or do statement expected"); return false; } if (c_parser_next_token_is_keyword (parser, RID_FOR)) c_parser_for_statement (parser, true); else if (c_parser_next_token_is_keyword (parser, RID_WHILE)) c_parser_while_statement (parser, true); else c_parser_do_statement (parser, true); return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_CILK_SIMD: if (!c_parser_cilk_verify_simd (parser, context)) return false; c_parser_consume_pragma (parser); c_parser_cilk_simd (parser); return false; case PRAGMA_CILK_GRAINSIZE: if (!flag_cilkplus) { warning (0, "%<#pragma grainsize%> ignored because -fcilkplus is not" " enabled"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } if (context == pragma_external) { error_at (c_parser_peek_token (parser)->location, "%<#pragma grainsize%> must be inside a function"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_cilk_grainsize (parser); return false; default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context != pragma_stmt && context != pragma_compound) { bad_stmt: c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct (parser); return true; } break; } c_parser_consume_pragma (parser); c_invoke_pragma_handler (id); /* Skip to EOL, but suppress any error message. Those will have been generated by the handler routine through calling error, as opposed to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol (parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { c_token *tok = c_parser_peek_token (the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token (the_parser); } return ret; } static void c_parser_pragma_pch_preprocess (c_parser *parser) { tree name = NULL; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_STRING)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else c_parser_error (parser, "expected string literal"); c_parser_skip_to_pragma_eol (parser); if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); } /* OpenACC and OpenMP parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_omp_clause_name (c_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword (parser, RID_AUTO)) result = PRAGMA_OACC_CLAUSE_AUTO; else if (c_parser_next_token_is_keyword (parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is_keyword (parser, RID_FOR)) result = PRAGMA_OMP_CLAUSE_FOR; else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'a': if (!strcmp ("aligned", p)) result = PRAGMA_OMP_CLAUSE_ALIGNED; else if (!strcmp ("async", p)) result = PRAGMA_OACC_CLAUSE_ASYNC; break; case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copy", p)) result = PRAGMA_OACC_CLAUSE_COPY; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyout", p)) result = PRAGMA_OACC_CLAUSE_COPYOUT; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; else if (!strcmp ("create", p)) result = PRAGMA_OACC_CLAUSE_CREATE; break; case 'd': if (!strcmp ("delete", p)) result = PRAGMA_OACC_CLAUSE_DELETE; else if (!strcmp ("depend", p)) result = PRAGMA_OMP_CLAUSE_DEPEND; else if (!strcmp ("device", p)) result = PRAGMA_OMP_CLAUSE_DEVICE; else if (!strcmp ("deviceptr", p)) result = PRAGMA_OACC_CLAUSE_DEVICEPTR; else if (!strcmp ("dist_schedule", p)) result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE; break; case 'f': if (!strcmp ("final", p)) result = PRAGMA_OMP_CLAUSE_FINAL; else if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; else if (!strcmp ("from", p)) result = PRAGMA_OMP_CLAUSE_FROM; break; case 'g': if (!strcmp ("gang", p)) result = PRAGMA_OACC_CLAUSE_GANG; break; case 'h': if (!strcmp ("host", p)) result = PRAGMA_OACC_CLAUSE_HOST; break; case 'i': if (!strcmp ("inbranch", p)) result = PRAGMA_OMP_CLAUSE_INBRANCH; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; else if (!strcmp ("linear", p)) result = PRAGMA_OMP_CLAUSE_LINEAR; break; case 'm': if (!strcmp ("map", p)) result = PRAGMA_OMP_CLAUSE_MAP; else if (!strcmp ("mergeable", p)) result = PRAGMA_OMP_CLAUSE_MERGEABLE; else if (flag_cilkplus && !strcmp ("mask", p)) result = PRAGMA_CILK_CLAUSE_MASK; break; case 'n': if (!strcmp ("notinbranch", p)) result = PRAGMA_OMP_CLAUSE_NOTINBRANCH; else if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_gangs", p)) result = PRAGMA_OACC_CLAUSE_NUM_GANGS; else if (!strcmp ("num_teams", p)) result = PRAGMA_OMP_CLAUSE_NUM_TEAMS; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; else if (!strcmp ("num_workers", p)) result = PRAGMA_OACC_CLAUSE_NUM_WORKERS; else if (flag_cilkplus && !strcmp ("nomask", p)) result = PRAGMA_CILK_CLAUSE_NOMASK; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("parallel", p)) result = PRAGMA_OMP_CLAUSE_PARALLEL; else if (!strcmp ("present", p)) result = PRAGMA_OACC_CLAUSE_PRESENT; else if (!strcmp ("present_or_copy", p) || !strcmp ("pcopy", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY; else if (!strcmp ("present_or_copyin", p) || !strcmp ("pcopyin", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN; else if (!strcmp ("present_or_copyout", p) || !strcmp ("pcopyout", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT; else if (!strcmp ("present_or_create", p) || !strcmp ("pcreate", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE; else if (!strcmp ("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (!strcmp ("proc_bind", p)) result = PRAGMA_OMP_CLAUSE_PROC_BIND; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("safelen", p)) result = PRAGMA_OMP_CLAUSE_SAFELEN; else if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("sections", p)) result = PRAGMA_OMP_CLAUSE_SECTIONS; else if (!strcmp ("seq", p)) result = PRAGMA_OACC_CLAUSE_SEQ; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; else if (!strcmp ("simdlen", p)) result = PRAGMA_OMP_CLAUSE_SIMDLEN; else if (!strcmp ("self", p)) result = PRAGMA_OACC_CLAUSE_SELF; break; case 't': if (!strcmp ("taskgroup", p)) result = PRAGMA_OMP_CLAUSE_TASKGROUP; else if (!strcmp ("thread_limit", p)) result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT; else if (!strcmp ("to", p)) result = PRAGMA_OMP_CLAUSE_TO; break; case 'u': if (!strcmp ("uniform", p)) result = PRAGMA_OMP_CLAUSE_UNIFORM; else if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; case 'v': if (!strcmp ("vector", p)) result = PRAGMA_OACC_CLAUSE_VECTOR; else if (!strcmp ("vector_length", p)) result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH; else if (flag_cilkplus && !strcmp ("vectorlength", p)) result = PRAGMA_CILK_CLAUSE_VECTORLENGTH; break; case 'w': if (!strcmp ("wait", p)) result = PRAGMA_OACC_CLAUSE_WAIT; else if (!strcmp ("worker", p)) result = PRAGMA_OACC_CLAUSE_WORKER; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token (parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { location_t loc = OMP_CLAUSE_LOCATION (c); error_at (loc, "too many %qs clauses", name); break; } } /* OpenACC 2.0 Parse wait clause or wait directive parameters. */ static tree c_parser_oacc_wait_list (c_parser *parser, location_t clause_loc, tree list) { vec<tree, va_gc> *args; tree t, args_tree; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; args = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); if (args->length () == 0) { c_parser_error (parser, "expected integer expression before ')'"); release_tree_vector (args); return list; } args_tree = build_tree_list_vec (args); for (t = args_tree; t; t = TREE_CHAIN (t)) { tree targ = TREE_VALUE (t); if (targ != error_mark_node) { if (!INTEGRAL_TYPE_P (TREE_TYPE (targ))) { c_parser_error (parser, "expression must be integral"); targ = error_mark_node; } else { tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT); OMP_CLAUSE_DECL (c) = targ; OMP_CLAUSE_CHAIN (c) = list; list = c; } } } release_tree_vector (args); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } /* OpenACC 2.0, OpenMP 2.5: variable-list: identifier variable-list , identifier If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is nonzero, CLAUSE_LOC is the location of the clause. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree c_parser_omp_variable_list (c_parser *parser, location_t clause_loc, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); if (t == NULL_TREE) { undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); t = error_mark_node; } c_parser_consume_token (parser); if (t == error_mark_node) ; else if (kind != 0) { switch (kind) { case OMP_CLAUSE__CACHE_: if (c_parser_peek_token (parser)->type != CPP_OPEN_SQUARE) { c_parser_error (parser, "expected %<[%>"); t = error_mark_node; break; } /* FALL THROUGH. */ case OMP_CLAUSE_MAP: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: case OMP_CLAUSE_DEPEND: while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { tree low_bound = NULL_TREE, length = NULL_TREE; c_parser_consume_token (parser); if (!c_parser_next_token_is (parser, CPP_COLON)) { low_bound = c_parser_expression (parser).value; mark_exp_read (low_bound); } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) length = integer_one_node; else { /* Look for `:'. */ if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { t = error_mark_node; break; } if (!c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { length = c_parser_expression (parser).value; mark_exp_read (length); } } /* Look for the closing `]'. */ if (!c_parser_require (parser, CPP_CLOSE_SQUARE, "expected %<]%>")) { t = error_mark_node; break; } if (kind == OMP_CLAUSE__CACHE_) { if (TREE_CODE (low_bound) != INTEGER_CST && !TREE_READONLY (low_bound)) { error_at (clause_loc, "%qD is not a constant", low_bound); t = error_mark_node; } if (TREE_CODE (length) != INTEGER_CST && !TREE_READONLY (length)) { error_at (clause_loc, "%qD is not a constant", length); t = error_mark_node; } } t = tree_cons (low_bound, length, t); } break; default: break; } if (t != error_mark_node) { tree u = build_omp_clause (clause_loc, kind); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; list = u; } } else list = tree_cons (t, NULL_TREE, list); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for OpenACC and OpenMP clauses. */ static tree c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list (parser, loc, kind, list); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenACC 2.0: copy ( variable-list ) copyin ( variable-list ) copyout ( variable-list ) create ( variable-list ) delete ( variable-list ) present ( variable-list ) present_or_copy ( variable-list ) pcopy ( variable-list ) present_or_copyin ( variable-list ) pcopyin ( variable-list ) present_or_copyout ( variable-list ) pcopyout ( variable-list ) present_or_create ( variable-list ) pcreate ( variable-list ) */ static tree c_parser_oacc_data_clause (c_parser *parser, pragma_omp_clause c_kind, tree list) { enum gomp_map_kind kind; switch (c_kind) { case PRAGMA_OACC_CLAUSE_COPY: kind = GOMP_MAP_FORCE_TOFROM; break; case PRAGMA_OACC_CLAUSE_COPYIN: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_COPYOUT: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_CREATE: kind = GOMP_MAP_FORCE_ALLOC; break; case PRAGMA_OACC_CLAUSE_DELETE: kind = GOMP_MAP_FORCE_DEALLOC; break; case PRAGMA_OACC_CLAUSE_DEVICE: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_HOST: case PRAGMA_OACC_CLAUSE_SELF: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_PRESENT: kind = GOMP_MAP_FORCE_PRESENT; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: kind = GOMP_MAP_TOFROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: kind = GOMP_MAP_TO; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: kind = GOMP_MAP_FROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: kind = GOMP_MAP_ALLOC; break; default: gcc_unreachable (); } tree nl, c; nl = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_MAP, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); return nl; } /* OpenACC 2.0: deviceptr ( variable-list ) */ static tree c_parser_oacc_data_clause_deviceptr (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; tree vars, t; /* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic c_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR, variable-list must only allow for pointer variables. */ vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); for (t = vars; t && t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ if (TREE_CODE (v) != VAR_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (!POINTER_TYPE_P (TREE_TYPE (v))) error_at (loc, "%qD is not a pointer variable", v); tree u = build_omp_clause (loc, OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR); OMP_CLAUSE_DECL (u) = v; OMP_CLAUSE_CHAIN (u) = list; list = u; } return list; } /* OpenACC 2.0, OpenMP 3.0: collapse ( constant-expression ) */ static tree c_parser_omp_clause_collapse (c_parser *parser, tree list) { tree c, num = error_mark_node; HOST_WIDE_INT n; location_t loc; check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse"); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { num = c_parser_expr_no_commas (parser, NULL).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (num == error_mark_node) return list; mark_exp_read (num); num = c_fully_fold (num, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !tree_fits_shwi_p (num) || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = num; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list); } /* OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list); } /* OpenMP 2.5: default ( shared | none ) */ static tree c_parser_omp_clause_default (c_parser *parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; location_t loc = c_parser_peek_token (parser)->location; tree c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token (parser); } else { invalid_kind: c_parser_error (parser, "expected %<none%> or %<shared%>"); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* OpenMP 3.1: final ( expression ) */ static tree c_parser_omp_clause_final (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final"); c = build_omp_clause (loc, OMP_CLAUSE_FINAL); OMP_CLAUSE_FINAL_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenACC, OpenMP 2.5: if ( expression ) */ static tree c_parser_omp_clause_if (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if"); c = build_omp_clause (loc, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list); } /* OpenMP 3.1: mergeable */ static tree c_parser_omp_clause_mergeable (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_MERGEABLE); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; location_t loc = c_parser_peek_token (parser)->location; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: num_gangs ( expression ) */ static tree c_parser_omp_clause_num_gangs (c_parser *parser, tree list) { location_t num_gangs_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_gangs%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_GANGS, "num_gangs"); c = build_omp_clause (num_gangs_loc, OMP_CLAUSE_NUM_GANGS); OMP_CLAUSE_NUM_GANGS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads (c_parser *parser, tree list) { location_t num_threads_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenACC: num_workers ( expression ) */ static tree c_parser_omp_clause_num_workers (c_parser *parser, tree list) { location_t num_workers_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_workers%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_WORKERS, "num_workers"); c = build_omp_clause (num_workers_loc, OMP_CLAUSE_NUM_WORKERS); OMP_CLAUSE_NUM_WORKERS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenACC: async [( int-expr )] */ static tree c_parser_oacc_clause_async (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL); if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) { c_parser_consume_token (parser); t = c_parser_expression (parser).value; if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) c_parser_error (parser, "expected integer expression"); else if (t == error_mark_node || !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) return list; } else t = c_fully_fold (t, false, NULL); check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async"); c = build_omp_clause (loc, OMP_CLAUSE_ASYNC); OMP_CLAUSE_ASYNC_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* OpenACC: wait ( int-expr-list ) */ static tree c_parser_oacc_clause_wait (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) list = c_parser_oacc_wait_list (parser, clause_loc, list); return list; } /* OpenMP 2.5: ordered */ static tree c_parser_omp_clause_ordered (c_parser *parser, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list); } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || OpenMP 3.1: reduction-operator: One of: + * - & ^ | && || max min OpenMP 4.0: reduction-operator: One of: + * - & ^ | && || identifier */ static tree c_parser_omp_clause_reduction (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { enum tree_code code = ERROR_MARK; tree reduc_id = NULL_TREE; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; case CPP_NAME: { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "min") == 0) { code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { code = MAX_EXPR; break; } reduc_id = c_parser_peek_token (parser)->value; break; } default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, %<||%>, %<min%> or %<max%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token (parser); reduc_id = c_omp_reduction_id (code, reduc_id); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) { tree type = TREE_TYPE (OMP_CLAUSE_DECL (c)); OMP_CLAUSE_REDUCTION_CODE (c) = code; if (code == ERROR_MARK || !(INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == COMPLEX_TYPE)) OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = c_omp_reduction_lookup (reduc_id, TYPE_MAIN_VARIANT (type)); } list = nl; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree c_parser_omp_clause_schedule (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE); if (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword (parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (c_parser_next_token_is_keyword (parser, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { location_t here; c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; t = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (here, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (here, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE) OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; else c_parser_error (parser, "expected integer expression"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list); } /* OpenMP 3.0: untied */ static tree c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: vector_length ( expression ) */ static tree c_parser_omp_clause_vector_length (c_parser *parser, tree list) { location_t vector_length_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<vector_length%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_VECTOR_LENGTH, "vector_length"); c = build_omp_clause (vector_length_loc, OMP_CLAUSE_VECTOR_LENGTH); OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: inbranch notinbranch */ static tree c_parser_omp_clause_branch (c_parser *parser ATTRIBUTE_UNUSED, enum omp_clause_code code, tree list) { check_no_duplicate_clause (list, code, omp_clause_code_name[code]); tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: parallel for sections taskgroup */ static tree c_parser_omp_clause_cancelkind (c_parser *parser ATTRIBUTE_UNUSED, enum omp_clause_code code, tree list) { tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: num_teams ( expression ) */ static tree c_parser_omp_clause_num_teams (c_parser *parser, tree list) { location_t num_teams_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_teams%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS, "num_teams"); c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: thread_limit ( expression ) */ static tree c_parser_omp_clause_thread_limit (c_parser *parser, tree list) { location_t num_thread_limit_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<thread_limit%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT, "thread_limit"); c = build_omp_clause (num_thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: aligned ( variable-list ) aligned ( variable-list : constant-expression ) */ static tree c_parser_omp_clause_aligned (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree nl, c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_ALIGNED, list); if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); tree alignment = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (alignment); alignment = c_fully_fold (alignment, false, NULL); if (TREE_CODE (alignment) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (alignment)) || tree_int_cst_sgn (alignment) != 1) { error_at (clause_loc, "%<aligned%> clause alignment expression must " "be positive constant integer expression"); alignment = NULL_TREE; } for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return nl; } /* OpenMP 4.0: linear ( variable-list ) linear ( variable-list : expression ) */ static tree c_parser_omp_clause_linear (c_parser *parser, tree list, bool is_cilk_simd_fn) { location_t clause_loc = c_parser_peek_token (parser)->location; tree nl, c, step; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_LINEAR, list); if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); step = c_parser_expression (parser).value; mark_exp_read (step); step = c_fully_fold (step, false, NULL); if (is_cilk_simd_fn && TREE_CODE (step) == PARM_DECL) { sorry ("using parameters for %<linear%> step is not supported yet"); step = integer_one_node; } if (!INTEGRAL_TYPE_P (TREE_TYPE (step))) { error_at (clause_loc, "%<linear%> clause step expression must " "be integral"); step = integer_one_node; } } else step = integer_one_node; for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) { OMP_CLAUSE_LINEAR_STEP (c) = step; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return nl; } /* OpenMP 4.0: safelen ( constant-expression ) */ static tree c_parser_omp_clause_safelen (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree c, t; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; t = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); if (TREE_CODE (t) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (t)) || tree_int_cst_sgn (t) != 1) { error_at (clause_loc, "%<safelen%> clause expression must " "be positive constant integer expression"); t = NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t == NULL_TREE || t == error_mark_node) return list; check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen"); c = build_omp_clause (clause_loc, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: simdlen ( constant-expression ) */ static tree c_parser_omp_clause_simdlen (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree c, t; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; t = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); if (TREE_CODE (t) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (t)) || tree_int_cst_sgn (t) != 1) { error_at (clause_loc, "%<simdlen%> clause expression must " "be positive constant integer expression"); t = NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t == NULL_TREE || t == error_mark_node) return list; check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen"); c = build_omp_clause (clause_loc, OMP_CLAUSE_SIMDLEN); OMP_CLAUSE_SIMDLEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: depend ( depend-kind: variable-list ) depend-kind: in | out | inout */ static tree c_parser_omp_clause_depend (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_INOUT; tree nl, c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("in", p) == 0) kind = OMP_CLAUSE_DEPEND_IN; else if (strcmp ("inout", p) == 0) kind = OMP_CLAUSE_DEPEND_INOUT; else if (strcmp ("out", p) == 0) kind = OMP_CLAUSE_DEPEND_OUT; else goto invalid_kind; } else goto invalid_kind; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto resync_fail; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_DEPEND, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_DEPEND_KIND (c) = kind; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return nl; invalid_kind: c_parser_error (parser, "invalid depend kind"); resync_fail: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } /* OpenMP 4.0: map ( map-kind: variable-list ) map ( variable-list ) map-kind: alloc | to | from | tofrom */ static tree c_parser_omp_clause_map (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum gomp_map_kind kind = GOMP_MAP_TOFROM; tree nl, c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("alloc", p) == 0) kind = GOMP_MAP_ALLOC; else if (strcmp ("to", p) == 0) kind = GOMP_MAP_TO; else if (strcmp ("from", p) == 0) kind = GOMP_MAP_FROM; else if (strcmp ("tofrom", p) == 0) kind = GOMP_MAP_TOFROM; else { c_parser_error (parser, "invalid map kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } c_parser_consume_token (parser); c_parser_consume_token (parser); } nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_MAP, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return nl; } /* OpenMP 4.0: device ( expression ) */ static tree c_parser_omp_clause_device (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { tree c, t = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE, "device"); c = build_omp_clause (clause_loc, OMP_CLAUSE_DEVICE); OMP_CLAUSE_DEVICE_ID (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: dist_schedule ( static ) dist_schedule ( static , expression ) */ static tree c_parser_omp_clause_dist_schedule (c_parser *parser, tree list) { tree c, t = NULL_TREE; location_t loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (!c_parser_next_token_is_keyword (parser, RID_STATIC)) { c_parser_error (parser, "invalid dist_schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); t = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (t); t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); if (t == error_mark_node) return list; c = build_omp_clause (loc, OMP_CLAUSE_DIST_SCHEDULE); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: proc_bind ( proc-bind-kind ) proc-bind-kind: master | close | spread */ static tree c_parser_omp_clause_proc_bind (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum omp_clause_proc_bind_kind kind; tree c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("master", p) == 0) kind = OMP_CLAUSE_PROC_BIND_MASTER; else if (strcmp ("close", p) == 0) kind = OMP_CLAUSE_PROC_BIND_CLOSE; else if (strcmp ("spread", p) == 0) kind = OMP_CLAUSE_PROC_BIND_SPREAD; else goto invalid_kind; } else goto invalid_kind; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); c = build_omp_clause (clause_loc, OMP_CLAUSE_PROC_BIND); OMP_CLAUSE_PROC_BIND_KIND (c) = kind; OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid proc_bind kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } /* OpenMP 4.0: to ( variable-list ) */ static tree c_parser_omp_clause_to (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO, list); } /* OpenMP 4.0: from ( variable-list ) */ static tree c_parser_omp_clause_from (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FROM, list); } /* OpenMP 4.0: uniform ( variable-list ) */ static tree c_parser_omp_clause_uniform (c_parser *parser, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list (parser, loc, OMP_CLAUSE_UNIFORM, list); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* Parse all OpenACC clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found. */ static tree c_parser_oacc_all_clauses (c_parser *parser, omp_clause_mask mask, const char *where, bool finish_p = true) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OACC_CLAUSE_ASYNC: clauses = c_parser_oacc_clause_async (parser, clauses); c_name = "async"; break; case PRAGMA_OACC_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OACC_CLAUSE_COPY: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copy"; break; case PRAGMA_OACC_CLAUSE_COPYIN: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyin"; break; case PRAGMA_OACC_CLAUSE_COPYOUT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyout"; break; case PRAGMA_OACC_CLAUSE_CREATE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "create"; break; case PRAGMA_OACC_CLAUSE_DELETE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "delete"; break; case PRAGMA_OACC_CLAUSE_DEVICE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "device"; break; case PRAGMA_OACC_CLAUSE_DEVICEPTR: clauses = c_parser_oacc_data_clause_deviceptr (parser, clauses); c_name = "deviceptr"; break; case PRAGMA_OACC_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OACC_CLAUSE_HOST: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "host"; break; case PRAGMA_OACC_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OACC_CLAUSE_NUM_GANGS: clauses = c_parser_omp_clause_num_gangs (parser, clauses); c_name = "num_gangs"; break; case PRAGMA_OACC_CLAUSE_NUM_WORKERS: clauses = c_parser_omp_clause_num_workers (parser, clauses); c_name = "num_workers"; break; case PRAGMA_OACC_CLAUSE_PRESENT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copy"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyin"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyout"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_create"; break; case PRAGMA_OACC_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OACC_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OACC_CLAUSE_SELF: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "self"; break; case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH: clauses = c_parser_omp_clause_vector_length (parser, clauses); c_name = "vector_length"; break; case PRAGMA_OACC_CLAUSE_WAIT: clauses = c_parser_oacc_clause_wait (parser, clauses); c_name = "wait"; break; default: c_parser_error (parser, "expected %<#pragma acc%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); if (finish_p) return c_finish_omp_clauses (clauses); return clauses; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found. */ static tree c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask, const char *where, bool finish_p = true) { tree clauses = NULL; bool first = true, cilk_simd_fn = false; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin (parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate (parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_FINAL: clauses = c_parser_omp_clause_final (parser, clauses); c_name = "final"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate (parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_MERGEABLE: clauses = c_parser_omp_clause_mergeable (parser, clauses); c_name = "mergeable"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared (parser, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = c_parser_omp_clause_untied (parser, clauses); c_name = "untied"; break; case PRAGMA_OMP_CLAUSE_INBRANCH: case PRAGMA_CILK_CLAUSE_MASK: clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH, clauses); c_name = "inbranch"; break; case PRAGMA_OMP_CLAUSE_NOTINBRANCH: case PRAGMA_CILK_CLAUSE_NOMASK: clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_NOTINBRANCH, clauses); c_name = "notinbranch"; break; case PRAGMA_OMP_CLAUSE_PARALLEL: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL, clauses); c_name = "parallel"; if (!first) { clause_not_first: error_at (here, "%qs must be the first clause of %qs", c_name, where); clauses = prev; } break; case PRAGMA_OMP_CLAUSE_FOR: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR, clauses); c_name = "for"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_SECTIONS: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS, clauses); c_name = "sections"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_TASKGROUP: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP, clauses); c_name = "taskgroup"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_TO: clauses = c_parser_omp_clause_to (parser, clauses); c_name = "to"; break; case PRAGMA_OMP_CLAUSE_FROM: clauses = c_parser_omp_clause_from (parser, clauses); c_name = "from"; break; case PRAGMA_OMP_CLAUSE_UNIFORM: clauses = c_parser_omp_clause_uniform (parser, clauses); c_name = "uniform"; break; case PRAGMA_OMP_CLAUSE_NUM_TEAMS: clauses = c_parser_omp_clause_num_teams (parser, clauses); c_name = "num_teams"; break; case PRAGMA_OMP_CLAUSE_THREAD_LIMIT: clauses = c_parser_omp_clause_thread_limit (parser, clauses); c_name = "thread_limit"; break; case PRAGMA_OMP_CLAUSE_ALIGNED: clauses = c_parser_omp_clause_aligned (parser, clauses); c_name = "aligned"; break; case PRAGMA_OMP_CLAUSE_LINEAR: if (((mask >> PRAGMA_CILK_CLAUSE_VECTORLENGTH) & 1) != 0) cilk_simd_fn = true; clauses = c_parser_omp_clause_linear (parser, clauses, cilk_simd_fn); c_name = "linear"; break; case PRAGMA_OMP_CLAUSE_DEPEND: clauses = c_parser_omp_clause_depend (parser, clauses); c_name = "depend"; break; case PRAGMA_OMP_CLAUSE_MAP: clauses = c_parser_omp_clause_map (parser, clauses); c_name = "map"; break; case PRAGMA_OMP_CLAUSE_DEVICE: clauses = c_parser_omp_clause_device (parser, clauses); c_name = "device"; break; case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE: clauses = c_parser_omp_clause_dist_schedule (parser, clauses); c_name = "dist_schedule"; break; case PRAGMA_OMP_CLAUSE_PROC_BIND: clauses = c_parser_omp_clause_proc_bind (parser, clauses); c_name = "proc_bind"; break; case PRAGMA_OMP_CLAUSE_SAFELEN: clauses = c_parser_omp_clause_safelen (parser, clauses); c_name = "safelen"; break; case PRAGMA_CILK_CLAUSE_VECTORLENGTH: clauses = c_parser_cilk_clause_vectorlength (parser, clauses, true); c_name = "simdlen"; break; case PRAGMA_OMP_CLAUSE_SIMDLEN: clauses = c_parser_omp_clause_simdlen (parser, clauses); c_name = "simdlen"; break; default: c_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); if (finish_p) return c_finish_omp_clauses (clauses); return clauses; } /* OpenACC 2.0, OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block (c_parser *parser) { tree stmt = push_stmt_list (); c_parser_statement (parser); return pop_stmt_list (stmt); } /* OpenACC 2.0: # pragma acc cache (variable-list) new-line LOC is the location of the #pragma token. */ static tree c_parser_oacc_cache (location_t loc, c_parser *parser) { tree stmt, clauses; clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE__CACHE_, NULL); clauses = c_finish_omp_clauses (clauses); c_parser_skip_to_pragma_eol (parser); stmt = make_node (OACC_CACHE); TREE_TYPE (stmt) = void_type_node; OACC_CACHE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return stmt; } /* OpenACC 2.0: # pragma acc data oacc-data-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) ) static tree c_parser_oacc_data (location_t loc, c_parser *parser) { tree stmt, clauses, block; clauses = c_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK, "#pragma acc data"); block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser)); stmt = c_finish_oacc_data (loc, clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc kernels oacc-kernels-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_KERNELS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static tree c_parser_oacc_kernels (location_t loc, c_parser *parser, char *p_name) { tree stmt, clauses = NULL_TREE, block; strcat (p_name, " kernels"); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "loop") == 0) { c_parser_consume_token (parser); block = c_begin_omp_parallel (); c_parser_oacc_loop (loc, parser, p_name); stmt = c_finish_oacc_kernels (loc, clauses, block); OACC_KERNELS_COMBINED (stmt) = 1; return stmt; } } clauses = c_parser_oacc_all_clauses (parser, OACC_KERNELS_CLAUSE_MASK, p_name); block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser)); stmt = c_finish_oacc_kernels (loc, clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc enter data oacc-enter-data-clause[optseq] new-line or # pragma acc exit data oacc-exit-data-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_ENTER_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) #define OACC_EXIT_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static void c_parser_oacc_enter_exit_data (c_parser *parser, bool enter) { location_t loc = c_parser_peek_token (parser)->location; tree clauses, stmt; c_parser_consume_pragma (parser); if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, enter ? "expected %<data%> in %<#pragma acc enter data%>" : "expected %<data%> in %<#pragma acc exit data%>"); c_parser_skip_to_pragma_eol (parser); return; } const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "data") != 0) { c_parser_error (parser, "invalid pragma"); c_parser_skip_to_pragma_eol (parser); return; } c_parser_consume_token (parser); if (enter) clauses = c_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK, "#pragma acc enter data"); else clauses = c_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK, "#pragma acc exit data"); if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (loc, enter ? "%<#pragma acc enter data%> has no data movement clause" : "%<#pragma acc exit data%> has no data movement clause"); return; } stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA); TREE_TYPE (stmt) = void_type_node; if (enter) OACC_ENTER_DATA_CLAUSES (stmt) = clauses; else OACC_EXIT_DATA_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); } /* OpenACC 2.0: # pragma acc loop oacc-loop-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_LOOP_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) ) static tree c_parser_oacc_loop (location_t loc, c_parser *parser, char *p_name) { tree stmt, clauses, block; strcat (p_name, " loop"); clauses = c_parser_oacc_all_clauses (parser, OACC_LOOP_CLAUSE_MASK, p_name); block = c_begin_compound_stmt (true); stmt = c_parser_omp_for_loop (loc, parser, OACC_LOOP, clauses, NULL); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return stmt; } /* OpenACC 2.0: # pragma acc parallel oacc-parallel-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static tree c_parser_oacc_parallel (location_t loc, c_parser *parser, char *p_name) { tree stmt, clauses = NULL_TREE, block; strcat (p_name, " parallel"); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "loop") == 0) { c_parser_consume_token (parser); block = c_begin_omp_parallel (); c_parser_oacc_loop (loc, parser, p_name); stmt = c_finish_oacc_parallel (loc, clauses, block); OACC_PARALLEL_COMBINED (stmt) = 1; return stmt; } } clauses = c_parser_oacc_all_clauses (parser, OACC_PARALLEL_CLAUSE_MASK, p_name); block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser)); stmt = c_finish_oacc_parallel (loc, clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc update oacc-update-clause[optseq] new-line */ #define OACC_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SELF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static void c_parser_oacc_update (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); tree clauses = c_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK, "#pragma acc update"); if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (loc, "%<#pragma acc update%> must contain at least one " "%<device%> or %<host/self%> clause"); return; } if (parser->error) return; tree stmt = make_node (OACC_UPDATE); TREE_TYPE (stmt) = void_type_node; OACC_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); } /* OpenACC 2.0: # pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_WAIT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) ) static tree c_parser_oacc_wait (location_t loc, c_parser *parser, char *p_name) { tree clauses, list = NULL_TREE, stmt = NULL_TREE; if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) list = c_parser_oacc_wait_list (parser, loc, list); strcpy (p_name, " wait"); clauses = c_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK, p_name); stmt = c_finish_oacc_wait (loc, list, clauses); return stmt; } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. OpenMP 3.1: # pragma omp atomic new-line update-stmt # pragma omp atomic read new-line read-stmt # pragma omp atomic write new-line write-stmt # pragma omp atomic update new-line update-stmt # pragma omp atomic capture new-line capture-stmt # pragma omp atomic capture new-line capture-block read-stmt: v = x write-stmt: x = expr update-stmt: expression-stmt | x = x binop expr capture-stmt: v = expression-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } OpenMP 4.0: update-stmt: expression-stmt | x = x binop expr | x = expr binop x capture-stmt: v = update-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; } where x and v are lvalue expressions with scalar type. LOC is the location of the #pragma token. */ static void c_parser_omp_atomic (location_t loc, c_parser *parser) { tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE; tree lhs1 = NULL_TREE, rhs1 = NULL_TREE; tree stmt, orig_lhs, unfolded_lhs = NULL_TREE, unfolded_lhs1 = NULL_TREE; enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR; struct c_expr expr; location_t eloc; bool structured_block = false; bool swapped = false; bool seq_cst = false; bool non_lvalue_p; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "seq_cst")) { seq_cst = true; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA) && c_parser_peek_2nd_token (parser)->type == CPP_NAME) c_parser_consume_token (parser); } } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "read")) code = OMP_ATOMIC_READ; else if (!strcmp (p, "write")) code = NOP_EXPR; else if (!strcmp (p, "update")) code = OMP_ATOMIC; else if (!strcmp (p, "capture")) code = OMP_ATOMIC_CAPTURE_NEW; else p = NULL; if (p) c_parser_consume_token (parser); } if (!seq_cst) { if (c_parser_next_token_is (parser, CPP_COMMA) && c_parser_peek_2nd_token (parser)->type == CPP_NAME) c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "seq_cst")) { seq_cst = true; c_parser_consume_token (parser); } } } c_parser_skip_to_pragma_eol (parser); switch (code) { case OMP_ATOMIC_READ: case NOP_EXPR: /* atomic write */ v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; if (code == NOP_EXPR) { lhs = c_parser_expression (parser).value; lhs = c_fully_fold (lhs, false, NULL); if (lhs == error_mark_node) goto saw_error; } else { lhs = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (lhs); lhs = c_fully_fold (lhs, false, NULL); if (lhs == error_mark_node) goto saw_error; if (non_lvalue_p) lhs = non_lvalue (lhs); } if (code == NOP_EXPR) { /* atomic write is represented by OMP_ATOMIC with NOP_EXPR opcode. */ code = OMP_ATOMIC; rhs = lhs; lhs = v; v = NULL_TREE; } goto done; case OMP_ATOMIC_CAPTURE_NEW: if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); structured_block = true; } else { v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; } break; default: break; } /* For structured_block case we don't know yet whether old or new x should be captured. */ restart: eloc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); lhs = expr.value; expr = default_function_array_conversion (eloc, expr); unfolded_lhs = expr.value; lhs = c_fully_fold (lhs, false, NULL); orig_lhs = lhs; switch (TREE_CODE (lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement (parser); if (structured_block) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) c_parser_consume_token (parser); else if (code == OMP_ATOMIC_CAPTURE_NEW) { c_parser_skip_to_end_of_block_or_statement (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) c_parser_consume_token (parser); } } return; case POSTINCREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = PLUS_EXPR; rhs = integer_one_node; break; case POSTDECREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR && TREE_OPERAND (lhs, 0) == TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) { /* This is pre or post decrement. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } } /* FALLTHRU */ default: if (!lvalue_p (unfolded_lhs)) lhs = non_lvalue (lhs); switch (c_parser_peek_token (parser)->type) { case CPP_MULT_EQ: opcode = MULT_EXPR; break; case CPP_DIV_EQ: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: opcode = PLUS_EXPR; break; case CPP_MINUS_EQ: opcode = MINUS_EXPR; break; case CPP_LSHIFT_EQ: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: opcode = RSHIFT_EXPR; break; case CPP_AND_EQ: opcode = BIT_AND_EXPR; break; case CPP_OR_EQ: opcode = BIT_IOR_EXPR; break; case CPP_XOR_EQ: opcode = BIT_XOR_EXPR; break; case CPP_EQ: c_parser_consume_token (parser); eloc = c_parser_peek_token (parser)->location; expr = c_parser_expr_no_commas (parser, NULL, unfolded_lhs); rhs1 = expr.value; switch (TREE_CODE (rhs1)) { case MULT_EXPR: case TRUNC_DIV_EXPR: case RDIV_EXPR: case PLUS_EXPR: case MINUS_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (c_tree_equal (TREE_OPERAND (rhs1, 0), unfolded_lhs)) { opcode = TREE_CODE (rhs1); rhs = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL); rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL); goto stmt_done; } if (c_tree_equal (TREE_OPERAND (rhs1, 1), unfolded_lhs)) { opcode = TREE_CODE (rhs1); rhs = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL); rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL); swapped = !commutative_tree_code (opcode); goto stmt_done; } break; case ERROR_MARK: goto saw_error; default: break; } if (c_parser_peek_token (parser)->type == CPP_SEMICOLON) { if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { code = OMP_ATOMIC_CAPTURE_OLD; v = lhs; lhs = NULL_TREE; expr = default_function_array_read_conversion (eloc, expr); unfolded_lhs1 = expr.value; lhs1 = c_fully_fold (unfolded_lhs1, false, NULL); rhs1 = NULL_TREE; c_parser_consume_token (parser); goto restart; } if (structured_block) { opcode = NOP_EXPR; expr = default_function_array_read_conversion (eloc, expr); rhs = c_fully_fold (expr.value, false, NULL); rhs1 = NULL_TREE; goto stmt_done; } } c_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; default: c_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } /* Arrange to pass the location of the assignment operator to c_finish_omp_atomic. */ loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); eloc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = default_function_array_read_conversion (eloc, expr); rhs = expr.value; rhs = c_fully_fold (rhs, false, NULL); break; } stmt_done: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) goto saw_error; v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; eloc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); lhs1 = expr.value; expr = default_function_array_read_conversion (eloc, expr); unfolded_lhs1 = expr.value; lhs1 = c_fully_fold (lhs1, false, NULL); if (lhs1 == error_mark_node) goto saw_error; if (!lvalue_p (unfolded_lhs1)) lhs1 = non_lvalue (lhs1); } if (structured_block) { c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); c_parser_require (parser, CPP_CLOSE_BRACE, "expected %<}%>"); } done: if (unfolded_lhs && unfolded_lhs1 && !c_tree_equal (unfolded_lhs, unfolded_lhs1)) { error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); stmt = error_mark_node; } else stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst); if (stmt != error_mark_node) add_stmt (stmt); if (!structured_block) c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* OpenMP 2.5: # pragma omp barrier new-line */ static void c_parser_omp_barrier (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_barrier (loc); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_critical (location_t loc, c_parser *parser) { tree stmt, name = NULL; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error (parser, "expected identifier"); } else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); stmt = c_parser_omp_structured_block (parser); return c_finish_omp_critical (loc, stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void c_parser_omp_flush (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); c_finish_omp_flush (loc); } /* Parse the restricted form of loop statements allowed by OpenACC and OpenMP. The real trick here is to determine the loop control variable early so that we can push a new decl if necessary to make it private. LOC is the location of the "acc" or "omp" in "#pragma acc" or "#pragma omp", respectively. */ static tree c_parser_omp_for_loop (location_t loc, c_parser *parser, enum tree_code code, tree clauses, tree *cclauses) { tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl; tree declv, condv, incrv, initv, ret = NULL_TREE; tree pre_body = NULL_TREE, this_pre_body; bool fail = false, open_brace_parsed = false; int i, collapse = 1, nbraces = 0; location_t for_loc; vec<tree, va_gc> *for_block = make_tree_vector (); for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); if (code != CILK_FOR && !c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_error (parser, "for statement expected"); return NULL; } if (code == CILK_FOR && !c_parser_next_token_is_keyword (parser, RID_CILK_FOR)) { c_parser_error (parser, "_Cilk_for statement expected"); return NULL; } for_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); for (i = 0; i < collapse; i++) { int bracecount = 0; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto pop_scopes; /* Parse the initialization declaration or expression. */ if (c_parser_next_tokens_start_declaration (parser)) { if (i > 0) vec_safe_push (for_block, c_begin_compound_stmt (true)); this_pre_body = push_stmt_list (); c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL); if (this_pre_body) { this_pre_body = pop_stmt_list (this_pre_body); if (pre_body) { tree t = pre_body; pre_body = push_stmt_list (); add_stmt (t); add_stmt (this_pre_body); pre_body = pop_stmt_list (pre_body); } else pre_body = this_pre_body; } decl = check_for_loop_decls (for_loc, flag_isoc99); if (decl == NULL) goto error_init; if (DECL_INITIAL (decl) == error_mark_node) decl = error_mark_node; init = decl; } else if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_EQ) { struct c_expr decl_exp; struct c_expr init_exp; location_t init_loc; decl_exp = c_parser_postfix_expression (parser); decl = decl_exp.value; c_parser_require (parser, CPP_EQ, "expected %<=%>"); init_loc = c_parser_peek_token (parser)->location; init_exp = c_parser_expr_no_commas (parser, NULL); init_exp = default_function_array_read_conversion (init_loc, init_exp); init = build_modify_expr (init_loc, decl, decl_exp.original_type, NOP_EXPR, init_loc, init_exp.value, init_exp.original_type); init = c_process_expr_stmt (init_loc, init); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { error_init: c_parser_error (parser, "expected iteration declaration or initialization"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); fail = true; goto parse_next; } /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_SEMICOLON)) { location_t cond_loc = c_parser_peek_token (parser)->location; struct c_expr cond_expr = c_parser_binary_expression (parser, NULL, NULL_TREE); cond = cond_expr.value; cond = c_objc_common_truthvalue_conversion (cond_loc, cond); cond = c_fully_fold (cond, false, NULL); switch (cond_expr.original_code) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; case NE_EXPR: if (code == CILK_SIMD || code == CILK_FOR) break; /* FALLTHRU. */ default: /* Can't be cond = error_mark_node, because we want to preserve the location until c_finish_omp_for. */ cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node); break; } protected_set_expr_location (cond, cond_loc); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) { location_t incr_loc = c_parser_peek_token (parser)->location; incr = c_process_expr_stmt (incr_loc, c_parser_expression (parser).value); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (decl == NULL || decl == error_mark_node || init == error_mark_node) fail = true; else { TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; } parse_next: if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ do { if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); break; } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); bracecount++; } else if (bracecount && c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "not enough perfectly nested loops"); if (bracecount) { open_brace_parsed = true; bracecount--; } fail = true; collapse = 0; break; } } while (1); nbraces += bracecount; } save_break = c_break_label; if (code == CILK_SIMD) c_break_label = build_int_cst (size_type_node, 2); else c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list (); if (open_brace_parsed) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); add_stmt (c_end_compound_stmt (here, stmt, true)); } else add_stmt (c_parser_c99_block_statement (parser)); if (c_cont_label) { tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label); SET_EXPR_LOCATION (t, loc); add_stmt (t); } body = pop_stmt_list (body); c_break_label = save_break; c_cont_label = save_cont; while (nbraces) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); nbraces--; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "collapsed loops not perfectly nested"); while (nbraces) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); add_stmt (body); c_parser_compound_statement_nostart (parser); body = c_end_compound_stmt (here, stmt, true); nbraces--; } goto pop_scopes; } } /* Only bother calling c_finish_omp_for if we haven't already generated an error from the initialization parsing. */ if (!fail) { stmt = c_finish_omp_for (loc, code, declv, initv, condv, incrv, body, pre_body); if (stmt) { if (cclauses != NULL && cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL) { tree *c; for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; ) if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE) c = &OMP_CLAUSE_CHAIN (*c); else { for (i = 0; i < collapse; i++) if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c)) break; if (i == collapse) c = &OMP_CLAUSE_CHAIN (*c); else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE) { error_at (loc, "iteration variable %qD should not be firstprivate", OMP_CLAUSE_DECL (*c)); *c = OMP_CLAUSE_CHAIN (*c); } else { /* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */ tree l = *c; *c = OMP_CLAUSE_CHAIN (*c); if (code == OMP_SIMD) { OMP_CLAUSE_CHAIN (l) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l; } else { OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; } } } } OMP_FOR_CLAUSES (stmt) = clauses; } ret = stmt; } pop_scopes: while (!for_block->is_empty ()) { /* FIXME diagnostics: LOC below should be the actual location of this particular for block. We need to build a list of locations to go along with FOR_BLOCK. */ stmt = c_end_compound_stmt (loc, for_block->pop (), true); add_stmt (stmt); } release_tree_vector (for_block); return ret; } /* Helper function for OpenMP parsing, split clauses and call finish_omp_clauses on each of the set of clauses afterwards. */ static void omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { int i; c_omp_split_clauses (loc, code, mask, clauses, cclauses); for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) if (cclauses[i]) cclauses[i] = c_finish_omp_clauses (cclauses[i]); } /* OpenMP 4.0: #pragma omp simd simd-clause[optseq] new-line for-loop LOC is the location of the #pragma token. */ #define OMP_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree c_parser_omp_simd (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree block, clauses, ret; strcat (p_name, " simd"); mask |= OMP_SIMD_CLAUSE_MASK; mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED); clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_SIMD, clauses, cclauses); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop OpenMP 4.0: #pragma omp for simd for-simd-clause[optseq] new-line for-loop LOC is the location of the #pragma token. */ #define OMP_FOR_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree block, clauses, ret; strcat (p_name, " for"); mask |= OMP_FOR_CLAUSE_MASK; if (cclauses) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "simd") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_simd (loc, parser, p_name, mask, cclauses); block = c_begin_compound_stmt (true); ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses); block = c_end_compound_stmt (loc, block, true); if (ret == NULL_TREE) return ret; ret = make_node (OMP_FOR); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = block; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_FOR, clauses, cclauses); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block LOC is the location of the #pragma token. */ static tree c_parser_omp_master (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_master (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_ordered (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_ordered (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block SECTIONS_LOC is the location of the #pragma omp sections. */ static tree c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list (); if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = c_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } while (1) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); error_suppress = false; } else if (!error_suppress) { error_at (loc, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = c_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<#pragma omp section%> or %<}%>"); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); SET_EXPR_LOCATION (stmt, sections_loc); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; return add_stmt (stmt); } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope LOC is the location of the #pragma token. */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree block, clauses, ret; strcat (p_name, " sections"); mask |= OMP_SECTIONS_CLAUSE_MASK; if (cclauses) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_sections_scope (loc, parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp parallel parallel-clause[optseq] new-line structured-block # pragma omp parallel for parallel-for-clause[optseq] new-line structured-block # pragma omp parallel sections parallel-sections-clause[optseq] new-line structured-block OpenMP 4.0: # pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND)) static tree c_parser_omp_parallel (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree stmt, clauses, block; strcat (p_name, " parallel"); mask |= OMP_PARALLEL_CLAUSE_MASK; if (c_parser_next_token_is_keyword (parser, RID_FOR)) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_for (loc, parser, p_name, mask, cclauses); block = c_begin_omp_parallel (); tree ret = c_parser_omp_for (loc, parser, p_name, mask, cclauses); stmt = c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); if (ret == NULL_TREE) return ret; OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } else if (cclauses) { error_at (loc, "expected %<for%> after %qs", p_name); c_parser_skip_to_pragma_eol (parser); return NULL_TREE; } else if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); block = c_begin_omp_parallel (); c_parser_omp_sections (loc, parser, p_name, mask, cclauses); stmt = c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); block = c_begin_omp_parallel (); c_parser_statement (parser); stmt = c_finish_omp_parallel (loc, clauses, block); return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block LOC is the location of the #pragma. */ #define OMP_SINGLE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single (location_t loc, c_parser *parser) { tree stmt = make_node (OMP_SINGLE); SET_EXPR_LOCATION (stmt, loc); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single"); OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_TASK_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND)) static tree c_parser_omp_task (location_t loc, c_parser *parser) { tree clauses, block; clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task"); block = c_begin_omp_task (); c_parser_statement (parser); return c_finish_omp_task (loc, clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void c_parser_omp_taskwait (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskwait (loc); } /* OpenMP 3.1: # pragma omp taskyield new-line */ static void c_parser_omp_taskyield (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskyield (loc); } /* OpenMP 4.0: # pragma omp taskgroup new-line */ static tree c_parser_omp_taskgroup (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_skip_to_pragma_eol (parser); return c_finish_omp_taskgroup (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 4.0: # pragma omp cancel cancel-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_CANCEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static void c_parser_omp_cancel (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); tree clauses = c_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK, "#pragma omp cancel"); c_finish_omp_cancel (loc, clauses); } /* OpenMP 4.0: # pragma omp cancellation point cancelpt-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_CANCELLATION_POINT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP)) static void c_parser_omp_cancellation_point (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree clauses; bool point_seen = false; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "point") == 0) { c_parser_consume_token (parser); point_seen = true; } } if (!point_seen) { c_parser_error (parser, "expected %<point%>"); c_parser_skip_to_pragma_eol (parser); return; } clauses = c_parser_omp_all_clauses (parser, OMP_CANCELLATION_POINT_CLAUSE_MASK, "#pragma omp cancellation point"); c_finish_omp_cancellation_point (loc, clauses); } /* OpenMP 4.0: #pragma omp distribute distribute-clause[optseq] new-line for-loop */ #define OMP_DISTRIBUTE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree c_parser_omp_distribute (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, block, ret; strcat (p_name, " distribute"); mask |= OMP_DISTRIBUTE_CLAUSE_MASK; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); bool simd = false; bool parallel = false; if (strcmp (p, "simd") == 0) simd = true; else parallel = strcmp (p, "parallel") == 0; if (parallel || simd) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ { if (simd) return c_parser_omp_simd (loc, parser, p_name, mask, cclauses); else return c_parser_omp_parallel (loc, parser, p_name, mask, cclauses); } block = c_begin_compound_stmt (true); if (simd) ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses); else ret = c_parser_omp_parallel (loc, parser, p_name, mask, cclauses); block = c_end_compound_stmt (loc, block, true); if (ret == NULL) return ret; ret = make_node (OMP_DISTRIBUTE); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = block; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_DISTRIBUTE, clauses, NULL); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 4.0: # pragma omp teams teams-clause[optseq] new-line structured-block */ #define OMP_TEAMS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT)) static tree c_parser_omp_teams (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, block, ret; strcat (p_name, " teams"); mask |= OMP_TEAMS_CLAUSE_MASK; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "distribute") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_distribute (loc, parser, p_name, mask, cclauses); block = c_begin_compound_stmt (true); ret = c_parser_omp_distribute (loc, parser, p_name, mask, cclauses); block = c_end_compound_stmt (loc, block, true); if (ret == NULL) return ret; clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; ret = make_node (OMP_TEAMS); TREE_TYPE (ret) = void_type_node; OMP_TEAMS_CLAUSES (ret) = clauses; OMP_TEAMS_BODY (ret) = block; OMP_TEAMS_COMBINED (ret) = 1; return add_stmt (ret); } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; } tree stmt = make_node (OMP_TEAMS); TREE_TYPE (stmt) = void_type_node; OMP_TEAMS_CLAUSES (stmt) = clauses; OMP_TEAMS_BODY (stmt) = c_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target data target-data-clause[optseq] new-line structured-block */ #define OMP_TARGET_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static tree c_parser_omp_target_data (location_t loc, c_parser *parser) { tree stmt = make_node (OMP_TARGET_DATA); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_DATA_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK, "#pragma omp target data"); keep_next_level (); tree block = c_begin_compound_stmt (true); add_stmt (c_parser_omp_structured_block (parser)); OMP_TARGET_DATA_BODY (stmt) = c_end_compound_stmt (loc, block, true); SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target update target-update-clause[optseq] new-line */ #define OMP_TARGET_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static bool c_parser_omp_target_update (location_t loc, c_parser *parser, enum pragma_context context) { if (context == pragma_stmt) { error_at (loc, "%<#pragma omp target update%> may only be " "used in compound statements"); c_parser_skip_to_pragma_eol (parser); return false; } tree clauses = c_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK, "#pragma omp target update"); if (find_omp_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE && find_omp_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE) { error_at (loc, "%<#pragma omp target update%> must contain at least one " "%<from%> or %<to%> clauses"); return false; } tree stmt = make_node (OMP_TARGET_UPDATE); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return false; } /* OpenMP 4.0: # pragma omp target target-clause[optseq] new-line structured-block */ #define OMP_TARGET_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static bool c_parser_omp_target (c_parser *parser, enum pragma_context context) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (context != pragma_stmt && context != pragma_compound) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_pragma_eol (parser); return false; } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "teams") == 0) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " "parallel for simd")]; c_parser_consume_token (parser); strcpy (p_name, "#pragma omp target"); if (!flag_openmp) /* flag_openmp_simd */ { tree stmt = c_parser_omp_teams (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses); return stmt != NULL_TREE; } keep_next_level (); tree block = c_begin_compound_stmt (true); tree ret = c_parser_omp_teams (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses); block = c_end_compound_stmt (loc, block, true); if (ret == NULL_TREE) return false; tree stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; OMP_TARGET_BODY (stmt) = block; add_stmt (stmt); return true; } else if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return false; } else if (strcmp (p, "data") == 0) { c_parser_consume_token (parser); c_parser_omp_target_data (loc, parser); return true; } else if (strcmp (p, "update") == 0) { c_parser_consume_token (parser); return c_parser_omp_target_update (loc, parser, context); } } tree stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK, "#pragma omp target"); keep_next_level (); tree block = c_begin_compound_stmt (true); add_stmt (c_parser_omp_structured_block (parser)); OMP_TARGET_BODY (stmt) = c_end_compound_stmt (loc, block, true); SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return true; } /* OpenMP 4.0: # pragma omp declare simd declare-simd-clauses[optseq] new-line */ #define OMP_DECLARE_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH)) static void c_parser_omp_declare_simd (c_parser *parser, enum pragma_context context) { vec<c_token> clauses = vNULL; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) { c_parser_skip_to_pragma_eol (parser); clauses.release (); return; } clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); while (c_parser_next_token_is (parser, CPP_PRAGMA)) { if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_DECLARE_REDUCTION || c_parser_peek_2nd_token (parser)->type != CPP_NAME || strcmp (IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value), "simd") != 0) { c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition or another " "%<#pragma omp declare simd%>"); clauses.release (); return; } c_parser_consume_pragma (parser); while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) { c_parser_skip_to_pragma_eol (parser); clauses.release (); return; } clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); } /* Make sure nothing tries to read past the end of the tokens. */ c_token eof_token; memset (&eof_token, 0, sizeof (eof_token)); eof_token.type = CPP_EOF; clauses.safe_push (eof_token); clauses.safe_push (eof_token); switch (context) { case pragma_external: if (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION) { int ext = disable_extension_diagnostics (); do c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION); c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, clauses); restore_extension_diagnostics (ext); } else c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, clauses); break; case pragma_struct: case pragma_param: c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition"); break; case pragma_compound: case pragma_stmt: if (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION) { int ext = disable_extension_diagnostics (); do c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION); if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, clauses); restore_extension_diagnostics (ext); break; } restore_extension_diagnostics (ext); } else if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, clauses); break; } c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition"); break; default: gcc_unreachable (); } clauses.release (); } /* Finalize #pragma omp declare simd clauses after FNDECL has been parsed, and put that into "omp declare simd" attribute. */ static void c_finish_omp_declare_simd (c_parser *parser, tree fndecl, tree parms, vec<c_token> clauses) { if (flag_cilkplus && clauses.exists () && !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) { error ("%<#pragma omp declare simd%> cannot be used in the same " "function marked as a Cilk Plus SIMD-enabled function"); vec_free (parser->cilk_simd_fn_tokens); return; } /* Normally first token is CPP_NAME "simd". CPP_EOF there indicates error has been reported and CPP_PRAGMA that c_finish_omp_declare_simd has already processed the tokens. */ if (clauses.exists () && clauses[0].type == CPP_EOF) return; if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL) { error ("%<#pragma omp declare simd%> not immediately followed by " "a function declaration or definition"); clauses[0].type = CPP_EOF; return; } if (clauses.exists () && clauses[0].type != CPP_NAME) { error_at (DECL_SOURCE_LOCATION (fndecl), "%<#pragma omp declare simd%> not immediately followed by " "a single function declaration or definition"); clauses[0].type = CPP_EOF; return; } if (parms == NULL_TREE) parms = DECL_ARGUMENTS (fndecl); unsigned int tokens_avail = parser->tokens_avail; gcc_assert (parser->tokens == &parser->tokens_buf[0]); bool is_cilkplus_cilk_simd_fn = false; if (flag_cilkplus && !vec_safe_is_empty (parser->cilk_simd_fn_tokens)) { parser->tokens = parser->cilk_simd_fn_tokens->address (); parser->tokens_avail = vec_safe_length (parser->cilk_simd_fn_tokens); is_cilkplus_cilk_simd_fn = true; } else { parser->tokens = clauses.address (); parser->tokens_avail = clauses.length (); } /* c_parser_omp_declare_simd pushed 2 extra CPP_EOF tokens at the end. */ while (parser->tokens_avail > 3) { c_token *token = c_parser_peek_token (parser); if (!is_cilkplus_cilk_simd_fn) gcc_assert (token->type == CPP_NAME && strcmp (IDENTIFIER_POINTER (token->value), "simd") == 0); else gcc_assert (token->type == CPP_NAME && is_cilkplus_vector_p (token->value)); c_parser_consume_token (parser); parser->in_pragma = true; tree c = NULL_TREE; if (is_cilkplus_cilk_simd_fn) c = c_parser_omp_all_clauses (parser, CILK_SIMD_FN_CLAUSE_MASK, "SIMD-enabled functions attribute"); else c = c_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK, "#pragma omp declare simd"); c = c_omp_declare_simd_clauses_to_numbers (parms, c); if (c != NULL_TREE) c = tree_cons (NULL_TREE, c, NULL_TREE); if (is_cilkplus_cilk_simd_fn) { tree k = build_tree_list (get_identifier ("cilk simd function"), NULL_TREE); TREE_CHAIN (k) = DECL_ATTRIBUTES (fndecl); DECL_ATTRIBUTES (fndecl) = k; } c = build_tree_list (get_identifier ("omp declare simd"), c); TREE_CHAIN (c) = DECL_ATTRIBUTES (fndecl); DECL_ATTRIBUTES (fndecl) = c; } parser->tokens = &parser->tokens_buf[0]; parser->tokens_avail = tokens_avail; if (clauses.exists ()) clauses[0].type = CPP_PRAGMA; if (!vec_safe_is_empty (parser->cilk_simd_fn_tokens)) vec_free (parser->cilk_simd_fn_tokens); } /* OpenMP 4.0: # pragma omp declare target new-line declarations and definitions # pragma omp end declare target new-line */ static void c_parser_omp_declare_target (c_parser *parser) { c_parser_skip_to_pragma_eol (parser); current_omp_declare_target_attribute++; } static void c_parser_omp_end_declare_target (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "declare") == 0) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "target") == 0) c_parser_consume_token (parser); else { c_parser_error (parser, "expected %<target%>"); c_parser_skip_to_pragma_eol (parser); return; } } else { c_parser_error (parser, "expected %<declare%>"); c_parser_skip_to_pragma_eol (parser); return; } c_parser_skip_to_pragma_eol (parser); if (!current_omp_declare_target_attribute) error_at (loc, "%<#pragma omp end declare target%> without corresponding " "%<#pragma omp declare target%>"); else current_omp_declare_target_attribute--; } /* OpenMP 4.0 #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line initializer-clause: initializer (omp_priv = initializer) initializer (function-name (argument-list)) */ static void c_parser_omp_declare_reduction (c_parser *parser, enum pragma_context context) { unsigned int tokens_avail = 0, i; vec<tree> types = vNULL; vec<c_token> clauses = vNULL; enum tree_code reduc_code = ERROR_MARK; tree reduc_id = NULL_TREE; tree type; location_t rloc = c_parser_peek_token (parser)->location; if (context == pragma_struct || context == pragma_param) { error ("%<#pragma omp declare reduction%> not at file or block scope"); goto fail; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto fail; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: reduc_code = PLUS_EXPR; break; case CPP_MULT: reduc_code = MULT_EXPR; break; case CPP_MINUS: reduc_code = MINUS_EXPR; break; case CPP_AND: reduc_code = BIT_AND_EXPR; break; case CPP_XOR: reduc_code = BIT_XOR_EXPR; break; case CPP_OR: reduc_code = BIT_IOR_EXPR; break; case CPP_AND_AND: reduc_code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: reduc_code = TRUTH_ORIF_EXPR; break; case CPP_NAME: const char *p; p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "min") == 0) { reduc_code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { reduc_code = MAX_EXPR; break; } reduc_id = c_parser_peek_token (parser)->value; break; default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, %<||%>, %<min%> or identifier"); goto fail; } tree orig_reduc_id, reduc_decl; orig_reduc_id = reduc_id; reduc_id = c_omp_reduction_id (reduc_code, reduc_id); reduc_decl = c_omp_reduction_decl (reduc_id); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto fail; while (true) { location_t loc = c_parser_peek_token (parser)->location; struct c_type_name *ctype = c_parser_type_name (parser); if (ctype != NULL) { type = groktypename (ctype, NULL, NULL); if (type == error_mark_node) ; else if ((INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == COMPLEX_TYPE) && orig_reduc_id == NULL_TREE) error_at (loc, "predeclared arithmetic type in " "%<#pragma omp declare reduction%>"); else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "function or array type in " "%<#pragma omp declare reduction%>"); else if (TYPE_QUALS_NO_ADDR_SPACE (type)) error_at (loc, "const, volatile or restrict qualified type in " "%<#pragma omp declare reduction%>"); else { tree t; for (t = DECL_INITIAL (reduc_decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) { error_at (loc, "redeclaration of %qs " "%<#pragma omp declare reduction%> for " "type %qT", IDENTIFIER_POINTER (reduc_id) + sizeof ("omp declare reduction ") - 1, type); location_t ploc = DECL_SOURCE_LOCATION (TREE_VEC_ELT (TREE_VALUE (t), 0)); error_at (ploc, "previous %<#pragma omp declare " "reduction%>"); break; } if (t == NULL_TREE) types.safe_push (type); } if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } else break; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>") || types.is_empty ()) { fail: clauses.release (); types.release (); while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; c_parser_consume_token (parser); } c_parser_skip_to_pragma_eol (parser); return; } if (types.length () > 1) { while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) goto fail; clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); /* Make sure nothing tries to read past the end of the tokens. */ c_token eof_token; memset (&eof_token, 0, sizeof (eof_token)); eof_token.type = CPP_EOF; clauses.safe_push (eof_token); clauses.safe_push (eof_token); } int errs = errorcount; FOR_EACH_VEC_ELT (types, i, type) { tokens_avail = parser->tokens_avail; gcc_assert (parser->tokens == &parser->tokens_buf[0]); if (!clauses.is_empty ()) { parser->tokens = clauses.address (); parser->tokens_avail = clauses.length (); parser->in_pragma = true; } bool nested = current_function_decl != NULL_TREE; if (nested) c_push_function_context (); tree fndecl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, reduc_id, default_function_type); current_function_decl = fndecl; allocate_struct_function (fndecl, true); push_scope (); tree stmt = push_stmt_list (); /* Intentionally BUILTINS_LOCATION, so that -Wshadow doesn't warn about these. */ tree omp_out = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_out"), type); DECL_ARTIFICIAL (omp_out) = 1; DECL_CONTEXT (omp_out) = fndecl; pushdecl (omp_out); tree omp_in = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_in"), type); DECL_ARTIFICIAL (omp_in) = 1; DECL_CONTEXT (omp_in) = fndecl; pushdecl (omp_in); struct c_expr combiner = c_parser_expression (parser); struct c_expr initializer; tree omp_priv = NULL_TREE, omp_orig = NULL_TREE; bool bad = false; initializer.value = error_mark_node; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) bad = true; else if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "initializer") == 0) { c_parser_consume_token (parser); pop_scope (); push_scope (); omp_priv = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_priv"), type); DECL_ARTIFICIAL (omp_priv) = 1; DECL_INITIAL (omp_priv) = error_mark_node; DECL_CONTEXT (omp_priv) = fndecl; pushdecl (omp_priv); omp_orig = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_orig"), type); DECL_ARTIFICIAL (omp_orig) = 1; DECL_CONTEXT (omp_orig) = fndecl; pushdecl (omp_orig); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) bad = true; else if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<omp_priv%> or " "function-name"); bad = true; } else if (strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "omp_priv") != 0) { if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN || c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected function-name %<(%>"); bad = true; } else initializer = c_parser_postfix_expression (parser); if (initializer.value && TREE_CODE (initializer.value) == CALL_EXPR) { int j; tree c = initializer.value; for (j = 0; j < call_expr_nargs (c); j++) if (TREE_CODE (CALL_EXPR_ARG (c, j)) == ADDR_EXPR && TREE_OPERAND (CALL_EXPR_ARG (c, j), 0) == omp_priv) break; if (j == call_expr_nargs (c)) error ("one of the initializer call arguments should be " "%<&omp_priv%>"); } } else { c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) bad = true; else { tree st = push_stmt_list (); start_init (omp_priv, NULL_TREE, 0); location_t loc = c_parser_peek_token (parser)->location; struct c_expr init = c_parser_initializer (parser); finish_init (); finish_decl (omp_priv, loc, init.value, init.original_type, NULL_TREE); pop_stmt_list (st); } } if (!bad && !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) bad = true; } if (!bad) { c_parser_skip_to_pragma_eol (parser); tree t = tree_cons (type, make_tree_vec (omp_priv ? 6 : 3), DECL_INITIAL (reduc_decl)); DECL_INITIAL (reduc_decl) = t; DECL_SOURCE_LOCATION (omp_out) = rloc; TREE_VEC_ELT (TREE_VALUE (t), 0) = omp_out; TREE_VEC_ELT (TREE_VALUE (t), 1) = omp_in; TREE_VEC_ELT (TREE_VALUE (t), 2) = combiner.value; walk_tree (&combiner.value, c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 0), NULL); if (omp_priv) { DECL_SOURCE_LOCATION (omp_priv) = rloc; TREE_VEC_ELT (TREE_VALUE (t), 3) = omp_priv; TREE_VEC_ELT (TREE_VALUE (t), 4) = omp_orig; TREE_VEC_ELT (TREE_VALUE (t), 5) = initializer.value; walk_tree (&initializer.value, c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 3), NULL); walk_tree (&DECL_INITIAL (omp_priv), c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 3), NULL); } } pop_stmt_list (stmt); pop_scope (); if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } set_cfun (NULL); current_function_decl = NULL_TREE; if (nested) c_pop_function_context (); if (!clauses.is_empty ()) { parser->tokens = &parser->tokens_buf[0]; parser->tokens_avail = tokens_avail; } if (bad) goto fail; if (errs != errorcount) break; } clauses.release (); types.release (); } /* OpenMP 4.0 #pragma omp declare simd declare-simd-clauses[optseq] new-line #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line #pragma omp declare target new-line */ static void c_parser_omp_declare (c_parser *parser, enum pragma_context context) { c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "simd") == 0) { /* c_parser_consume_token (parser); done in c_parser_omp_declare_simd. */ c_parser_omp_declare_simd (parser, context); return; } if (strcmp (p, "reduction") == 0) { c_parser_consume_token (parser); c_parser_omp_declare_reduction (parser, context); return; } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return; } if (strcmp (p, "target") == 0) { c_parser_consume_token (parser); c_parser_omp_declare_target (parser); return; } } c_parser_error (parser, "expected %<simd%> or %<reduction%> " "or %<target%>"); c_parser_skip_to_pragma_eol (parser); } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct (c_parser *parser) { enum pragma_kind p_kind; location_t loc; tree stmt; char p_name[sizeof "#pragma omp teams distribute parallel for simd"]; omp_clause_mask mask (0); loc = c_parser_peek_token (parser)->location; p_kind = c_parser_peek_token (parser)->pragma_kind; c_parser_consume_pragma (parser); switch (p_kind) { case PRAGMA_OACC_CACHE: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_cache (loc, parser); break; case PRAGMA_OACC_DATA: stmt = c_parser_oacc_data (loc, parser); break; case PRAGMA_OACC_KERNELS: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_kernels (loc, parser, p_name); break; case PRAGMA_OACC_LOOP: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_loop (loc, parser, p_name); break; case PRAGMA_OACC_PARALLEL: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_parallel (loc, parser, p_name); break; case PRAGMA_OACC_WAIT: strcpy (p_name, "#pragma wait"); stmt = c_parser_oacc_wait (loc, parser, p_name); break; case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic (loc, parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical (loc, parser); break; case PRAGMA_OMP_DISTRIBUTE: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_distribute (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_FOR: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_for (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master (loc, parser); break; case PRAGMA_OMP_ORDERED: stmt = c_parser_omp_ordered (loc, parser); break; case PRAGMA_OMP_PARALLEL: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_parallel (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_SECTIONS: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_sections (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_SIMD: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_simd (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single (loc, parser); break; case PRAGMA_OMP_TASK: stmt = c_parser_omp_task (loc, parser); break; case PRAGMA_OMP_TASKGROUP: stmt = c_parser_omp_taskgroup (parser); break; case PRAGMA_OMP_TEAMS: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_teams (loc, parser, p_name, mask, NULL); break; default: gcc_unreachable (); } if (stmt) gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void c_parser_omp_threadprivate (c_parser *parser) { tree vars, t; location_t loc; c_parser_consume_pragma (parser); loc = c_parser_peek_token (parser)->location; vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ if (TREE_CODE (v) != VAR_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v)) error_at (loc, "%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (! COMPLETE_TYPE_P (TREE_TYPE (v))) error_at (loc, "%<threadprivate%> %qE has incomplete type", v); else { if (! DECL_THREAD_LOCAL_P (v)) { set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } C_DECL_THREADPRIVATE_P (v) = 1; } } c_parser_skip_to_pragma_eol (parser); } /* Cilk Plus <#pragma simd> parsing routines. */ /* Helper function for c_parser_pragma. Perform some sanity checking for <#pragma simd> constructs. Returns FALSE if there was a problem. */ static bool c_parser_cilk_verify_simd (c_parser *parser, enum pragma_context context) { if (!flag_cilkplus) { warning (0, "pragma simd ignored because -fcilkplus is not enabled"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } if (context == pragma_external) { c_parser_error (parser,"pragma simd must be inside a function"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } return true; } /* Cilk Plus: This function is shared by SIMD-enabled functions and #pragma simd. If IS_SIMD_FN is true then it is parsing a SIMD-enabled function and CLAUSES is unused. The main purpose of this function is to parse a vectorlength attribute or clause and check for parse errors. When IS_SIMD_FN is true then the function is merely caching the tokens in PARSER->CILK_SIMD_FN_TOKENS. If errors are found then the token cache is cleared since there is no reason to continue. Syntax: vectorlength ( constant-expression ) */ static tree c_parser_cilk_clause_vectorlength (c_parser *parser, tree clauses, bool is_simd_fn) { if (is_simd_fn) check_no_duplicate_clause (clauses, OMP_CLAUSE_SIMDLEN, "vectorlength"); else /* The vectorlength clause behaves exactly like OpenMP's safelen clause. Represent it in OpenMP terms. */ check_no_duplicate_clause (clauses, OMP_CLAUSE_SAFELEN, "vectorlength"); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return clauses; location_t loc = c_parser_peek_token (parser)->location; tree expr = c_parser_expr_no_commas (parser, NULL).value; expr = c_fully_fold (expr, false, NULL); /* If expr is an error_mark_node then the above function would have emitted an error. No reason to do it twice. */ if (expr == error_mark_node) ; else if (!TREE_TYPE (expr) || !TREE_CONSTANT (expr) || !INTEGRAL_TYPE_P (TREE_TYPE (expr))) error_at (loc, "vectorlength must be an integer constant"); else if (wi::exact_log2 (expr) == -1) error_at (loc, "vectorlength must be a power of 2"); else { if (is_simd_fn) { tree u = build_omp_clause (loc, OMP_CLAUSE_SIMDLEN); OMP_CLAUSE_SIMDLEN_EXPR (u) = expr; OMP_CLAUSE_CHAIN (u) = clauses; clauses = u; } else { tree u = build_omp_clause (loc, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (u) = expr; OMP_CLAUSE_CHAIN (u) = clauses; clauses = u; } } c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return clauses; } /* Cilk Plus: linear ( simd-linear-variable-list ) simd-linear-variable-list: simd-linear-variable simd-linear-variable-list , simd-linear-variable simd-linear-variable: id-expression id-expression : simd-linear-step simd-linear-step: conditional-expression */ static tree c_parser_cilk_clause_linear (c_parser *parser, tree clauses) { if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return clauses; location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree var = lookup_name (c_parser_peek_token (parser)->value); if (var == NULL) { undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (var == error_mark_node) c_parser_consume_token (parser); else { tree step = integer_one_node; /* Parse the linear step if present. */ if (c_parser_peek_2nd_token (parser)->type == CPP_COLON) { c_parser_consume_token (parser); c_parser_consume_token (parser); tree expr = c_parser_expr_no_commas (parser, NULL).value; expr = c_fully_fold (expr, false, NULL); if (TREE_TYPE (expr) && INTEGRAL_TYPE_P (TREE_TYPE (expr)) && (TREE_CONSTANT (expr) || DECL_P (expr))) step = expr; else c_parser_error (parser, "step size must be an integer constant " "expression or an integer variable"); } else c_parser_consume_token (parser); /* Use OMP_CLAUSE_LINEAR, which has the same semantics. */ tree u = build_omp_clause (loc, OMP_CLAUSE_LINEAR); OMP_CLAUSE_DECL (u) = var; OMP_CLAUSE_LINEAR_STEP (u) = step; OMP_CLAUSE_CHAIN (u) = clauses; clauses = u; } if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return clauses; } /* Returns the name of the next clause. If the clause is not recognized SIMD_OMP_CLAUSE_NONE is returned and the next token is not consumed. Otherwise, the appropriate pragma_simd_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_cilk_clause_name (c_parser *parser) { pragma_omp_clause result; c_token *token = c_parser_peek_token (parser); if (!token->value || token->type != CPP_NAME) return PRAGMA_CILK_CLAUSE_NONE; const char *p = IDENTIFIER_POINTER (token->value); if (!strcmp (p, "vectorlength")) result = PRAGMA_CILK_CLAUSE_VECTORLENGTH; else if (!strcmp (p, "linear")) result = PRAGMA_CILK_CLAUSE_LINEAR; else if (!strcmp (p, "private")) result = PRAGMA_CILK_CLAUSE_PRIVATE; else if (!strcmp (p, "firstprivate")) result = PRAGMA_CILK_CLAUSE_FIRSTPRIVATE; else if (!strcmp (p, "lastprivate")) result = PRAGMA_CILK_CLAUSE_LASTPRIVATE; else if (!strcmp (p, "reduction")) result = PRAGMA_CILK_CLAUSE_REDUCTION; else return PRAGMA_CILK_CLAUSE_NONE; c_parser_consume_token (parser); return result; } /* Parse all #<pragma simd> clauses. Return the list of clauses found. */ static tree c_parser_cilk_all_clauses (c_parser *parser) { tree clauses = NULL; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { pragma_omp_clause c_kind; c_kind = c_parser_cilk_clause_name (parser); switch (c_kind) { case PRAGMA_CILK_CLAUSE_VECTORLENGTH: clauses = c_parser_cilk_clause_vectorlength (parser, clauses, false); break; case PRAGMA_CILK_CLAUSE_LINEAR: clauses = c_parser_cilk_clause_linear (parser, clauses); break; case PRAGMA_CILK_CLAUSE_PRIVATE: /* Use the OpenMP counterpart. */ clauses = c_parser_omp_clause_private (parser, clauses); break; case PRAGMA_CILK_CLAUSE_FIRSTPRIVATE: /* Use the OpenMP counterpart. */ clauses = c_parser_omp_clause_firstprivate (parser, clauses); break; case PRAGMA_CILK_CLAUSE_LASTPRIVATE: /* Use the OpenMP counterpart. */ clauses = c_parser_omp_clause_lastprivate (parser, clauses); break; case PRAGMA_CILK_CLAUSE_REDUCTION: /* Use the OpenMP counterpart. */ clauses = c_parser_omp_clause_reduction (parser, clauses); break; default: c_parser_error (parser, "expected %<#pragma simd%> clause"); goto saw_error; } } saw_error: c_parser_skip_to_pragma_eol (parser); return c_finish_cilk_clauses (clauses); } /* This function helps parse the grainsize pragma for a _Cilk_for statement. Here is the correct syntax of this pragma: #pragma cilk grainsize = <EXP> */ static void c_parser_cilk_grainsize (c_parser *parser) { extern tree convert_to_integer (tree, tree); /* consume the 'grainsize' keyword. */ c_parser_consume_pragma (parser); if (c_parser_require (parser, CPP_EQ, "expected %<=%>") != 0) { struct c_expr g_expr = c_parser_binary_expression (parser, NULL, NULL); if (g_expr.value == error_mark_node) { c_parser_skip_to_pragma_eol (parser); return; } tree grain = convert_to_integer (long_integer_type_node, c_fully_fold (g_expr.value, false, NULL)); c_parser_skip_to_pragma_eol (parser); c_token *token = c_parser_peek_token (parser); if (token && token->type == CPP_KEYWORD && token->keyword == RID_CILK_FOR) { if (grain == NULL_TREE || grain == error_mark_node) grain = integer_zero_node; c_parser_cilk_for (parser, grain); } else warning (0, "%<#pragma cilk grainsize%> is not followed by " "%<_Cilk_for%>"); } else c_parser_skip_to_pragma_eol (parser); } /* Main entry point for parsing Cilk Plus <#pragma simd> for loops. */ static void c_parser_cilk_simd (c_parser *parser) { tree clauses = c_parser_cilk_all_clauses (parser); tree block = c_begin_compound_stmt (true); location_t loc = c_parser_peek_token (parser)->location; c_parser_omp_for_loop (loc, parser, CILK_SIMD, clauses, NULL); block = c_end_compound_stmt (loc, block, true); add_stmt (block); } /* Create an artificial decl with TYPE and emit initialization of it with INIT. */ static tree c_get_temp_regvar (tree type, tree init) { location_t loc = EXPR_LOCATION (init); tree decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; pushdecl (decl); tree t = build2 (INIT_EXPR, type, decl, init); add_stmt (t); return decl; } /* Main entry point for parsing Cilk Plus _Cilk_for loops. GRAIN is the grain value passed in through pragma or 0. */ static void c_parser_cilk_for (c_parser *parser, tree grain) { tree clauses = build_omp_clause (EXPR_LOCATION (grain), OMP_CLAUSE_SCHEDULE); OMP_CLAUSE_SCHEDULE_KIND (clauses) = OMP_CLAUSE_SCHEDULE_CILKFOR; OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clauses) = grain; clauses = c_finish_omp_clauses (clauses); tree block = c_begin_compound_stmt (true); tree sb = push_stmt_list (); location_t loc = c_parser_peek_token (parser)->location; tree omp_for = c_parser_omp_for_loop (loc, parser, CILK_FOR, clauses, NULL); sb = pop_stmt_list (sb); if (omp_for) { tree omp_par = make_node (OMP_PARALLEL); TREE_TYPE (omp_par) = void_type_node; OMP_PARALLEL_CLAUSES (omp_par) = NULL_TREE; tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = sb; OMP_PARALLEL_BODY (omp_par) = bind; if (OMP_FOR_PRE_BODY (omp_for)) { add_stmt (OMP_FOR_PRE_BODY (omp_for)); OMP_FOR_PRE_BODY (omp_for) = NULL_TREE; } tree init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0); tree decl = TREE_OPERAND (init, 0); tree cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), 0); tree incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree t = TREE_OPERAND (cond, 1), c, clauses = NULL_TREE; if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (cond, 1) = c_get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (cond, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (TREE_CODE (incr) == MODIFY_EXPR) { t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = c_get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } t = TREE_OPERAND (init, 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (init, 1) = c_get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (init, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; c = build_omp_clause (input_location, OMP_CLAUSE__CILK_FOR_COUNT_); OMP_CLAUSE_OPERAND (c, 0) = cilk_for_number_of_iterations (omp_for); OMP_CLAUSE_CHAIN (c) = clauses; OMP_PARALLEL_CLAUSES (omp_par) = c_finish_omp_clauses (c); add_stmt (omp_par); } block = c_end_compound_stmt (loc, block, true); add_stmt (block); } /* Parse a transaction attribute (GCC Extension). transaction-attribute: attributes [ [ any-word ] ] The transactional memory language description is written for C++, and uses the C++0x attribute syntax. For compatibility, allow the bracket style for transactions in C as well. */ static tree c_parser_transaction_attributes (c_parser *parser) { tree attr_name, attr = NULL; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) return c_parser_attributes (parser); if (!c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) return NULL_TREE; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>")) goto error1; attr_name = c_parser_attribute_any_word (parser); if (attr_name) { c_parser_consume_token (parser); attr = build_tree_list (attr_name, NULL_TREE); } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); error1: c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return attr; } /* Parse a __transaction_atomic or __transaction_relaxed statement (GCC Extension). transaction-statement: __transaction_atomic transaction-attribute[opt] compound-statement __transaction_relaxed compound-statement Note that the only valid attribute is: "outer". */ static tree c_parser_transaction (c_parser *parser, enum rid keyword) { unsigned int old_in = parser->in_transaction; unsigned int this_in = 1, new_in; location_t loc = c_parser_peek_token (parser)->location; tree stmt, attrs; gcc_assert ((keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED) && c_parser_next_token_is_keyword (parser, keyword)); c_parser_consume_token (parser); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = c_parser_transaction_attributes (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } /* Keep track if we're in the lexical scope of an outer transaction. */ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER); parser->in_transaction = new_in; stmt = c_parser_compound_statement (parser); parser->in_transaction = old_in; if (flag_tm) stmt = c_finish_transaction (loc, stmt, this_in); else error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ? "%<__transaction_atomic%> without transactional memory support enabled" : "%<__transaction_relaxed %> " "without transactional memory support enabled")); return stmt; } /* Parse a __transaction_atomic or __transaction_relaxed expression (GCC Extension). transaction-expression: __transaction_atomic ( expression ) __transaction_relaxed ( expression ) */ static struct c_expr c_parser_transaction_expression (c_parser *parser, enum rid keyword) { struct c_expr ret; unsigned int old_in = parser->in_transaction; unsigned int this_in = 1; location_t loc = c_parser_peek_token (parser)->location; tree attrs; gcc_assert ((keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED) && c_parser_next_token_is_keyword (parser, keyword)); c_parser_consume_token (parser); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = c_parser_transaction_attributes (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, 0); } parser->in_transaction = this_in; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { tree expr = c_parser_expression (parser).value; ret.original_type = TREE_TYPE (expr); ret.value = build1 (TRANSACTION_EXPR, ret.original_type, expr); if (this_in & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret.value) = 1; SET_EXPR_LOCATION (ret.value, loc); ret.original_code = TRANSACTION_EXPR; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } } else { error: ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; } parser->in_transaction = old_in; if (!flag_tm) error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ? "%<__transaction_atomic%> without transactional memory support enabled" : "%<__transaction_relaxed %> " "without transactional memory support enabled")); return ret; } /* Parse a __transaction_cancel statement (GCC Extension). transaction-cancel-statement: __transaction_cancel transaction-attribute[opt] ; Note that the only valid attribute is "outer". */ static tree c_parser_transaction_cancel (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree attrs; bool is_outer = false; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRANSACTION_CANCEL)); c_parser_consume_token (parser); attrs = c_parser_transaction_attributes (parser); if (attrs) is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0); if (!flag_tm) { error_at (loc, "%<__transaction_cancel%> without " "transactional memory support enabled"); goto ret_error; } else if (parser->in_transaction & TM_STMT_ATTR_RELAXED) { error_at (loc, "%<__transaction_cancel%> within a " "%<__transaction_relaxed%>"); goto ret_error; } else if (is_outer) { if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0 && !is_tm_may_cancel_outer (current_function_decl)) { error_at (loc, "outer %<__transaction_cancel%> not " "within outer %<__transaction_atomic%>"); error_at (loc, " or a %<transaction_may_cancel_outer%> function"); goto ret_error; } } else if (parser->in_transaction == 0) { error_at (loc, "%<__transaction_cancel%> not within " "%<__transaction_atomic%>"); goto ret_error; } return add_stmt (build_tm_abort_call (loc, is_outer)); ret_error: return build1 (NOP_EXPR, void_type_node, error_mark_node); } /* Parse a single source file. */ void c_parse_file (void) { /* Use local storage to begin. If the first token is a pragma, parse it. If it is #pragma GCC pch_preprocess, then this will load a PCH file which will cause garbage collection. */ c_parser tparser; memset (&tparser, 0, sizeof tparser); tparser.tokens = &tparser.tokens_buf[0]; the_parser = &tparser; if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess (&tparser); the_parser = ggc_alloc<c_parser> (); *the_parser = tparser; if (tparser.tokens == &tparser.tokens_buf[0]) the_parser->tokens = &the_parser->tokens_buf[0]; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions) using_eh_for_cleanups (); c_parser_translation_unit (the_parser); the_parser = NULL; } /* This function parses Cilk Plus array notation. The starting index is passed in INITIAL_INDEX and the array name is passes in ARRAY_VALUE. The return value of this function is a tree_node called VALUE_TREE of type ARRAY_NOTATION_REF. */ static tree c_parser_array_notation (location_t loc, c_parser *parser, tree initial_index, tree array_value) { c_token *token = NULL; tree start_index = NULL_TREE, end_index = NULL_TREE, stride = NULL_TREE; tree value_tree = NULL_TREE, type = NULL_TREE, array_type = NULL_TREE; tree array_type_domain = NULL_TREE; if (array_value == error_mark_node || initial_index == error_mark_node) { /* No need to continue. If either of these 2 were true, then an error must be emitted already. Thus, no need to emit them twice. */ c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } array_type = TREE_TYPE (array_value); gcc_assert (array_type); if (TREE_CODE (array_type) != ARRAY_TYPE && TREE_CODE (array_type) != POINTER_TYPE) { error_at (loc, "base of array section must be pointer or array type"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } type = TREE_TYPE (array_type); token = c_parser_peek_token (parser); if (token->type == CPP_EOF) { c_parser_error (parser, "expected %<:%> or numeral"); return value_tree; } else if (token->type == CPP_COLON) { if (!initial_index) { /* If we are here, then we have a case like this A[:]. */ c_parser_consume_token (parser); if (TREE_CODE (array_type) == POINTER_TYPE) { error_at (loc, "start-index and length fields necessary for " "using array notations in pointers"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } if (TREE_CODE (array_type) == FUNCTION_TYPE) { error_at (loc, "array notations cannot be used with function " "type"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } array_type_domain = TYPE_DOMAIN (array_type); if (!array_type_domain) { error_at (loc, "start-index and length fields necessary for " "using array notations in dimensionless arrays"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } start_index = TYPE_MINVAL (array_type_domain); start_index = fold_build1 (CONVERT_EXPR, ptrdiff_type_node, start_index); if (!TYPE_MAXVAL (array_type_domain) || !TREE_CONSTANT (TYPE_MAXVAL (array_type_domain))) { error_at (loc, "start-index and length fields necessary for " "using array notations in variable-length arrays"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } end_index = TYPE_MAXVAL (array_type_domain); end_index = fold_build2 (PLUS_EXPR, TREE_TYPE (end_index), end_index, integer_one_node); end_index = fold_build1 (CONVERT_EXPR, ptrdiff_type_node, end_index); stride = build_int_cst (integer_type_node, 1); stride = fold_build1 (CONVERT_EXPR, ptrdiff_type_node, stride); } else if (initial_index != error_mark_node) { /* If we are here, then there should be 2 possibilities: 1. Array [EXPR : EXPR] 2. Array [EXPR : EXPR : EXPR] */ start_index = initial_index; if (TREE_CODE (array_type) == FUNCTION_TYPE) { error_at (loc, "array notations cannot be used with function " "type"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return error_mark_node; } c_parser_consume_token (parser); /* consume the ':' */ struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); end_index = ce.value; if (!end_index || end_index == error_mark_node) { c_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } if (c_parser_peek_token (parser)->type == CPP_COLON) { c_parser_consume_token (parser); ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); stride = ce.value; if (!stride || stride == error_mark_node) { c_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } } } else c_parser_error (parser, "expected array notation expression"); } else c_parser_error (parser, "expected array notation expression"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); value_tree = build_array_notation_ref (loc, array_value, start_index, end_index, stride, type); if (value_tree != error_mark_node) SET_EXPR_LOCATION (value_tree, loc); return value_tree; } #include "gt-c-c-parser.h"
DRB029-truedep1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program has data races due to true dependence within the loop at 63. Data race pair: a[i+1]@64:5 vs. a[i]@64:12 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for schedule(dynamic) for (i=0;i<len-1;i++) a[i+1]=a[i]+1; printf("a[50]=%d\n", a[50]); return 0; }
forces.c
/* * Compute forces and accumulate the virial and the potential */ extern double epot, vir; void forces(int npart, double x[], double f[], double side, double rcoff){ int i; vir = 0.0; epot = 0.0; #pragma omp parallel for default(none) shared(f,x,npart,side,rcoff) reduction(+:epot,vir) schedule(static,32) for (i=0; i<npart*3; i+=3) { // zero force components on particle i double fxi = 0.0; double fyi = 0.0; double fzi = 0.0; int j; // loop over all particles with index > i for (j=i+3; j<npart*3; j+=3) { // compute distance between particles i and j allowing for wraparound double xx = x[i]-x[j]; double yy = x[i+1]-x[j+1]; double zz = x[i+2]-x[j+2]; if (xx< (-0.5*side) ) xx += side; if (xx> (0.5*side) ) xx -= side; if (yy< (-0.5*side) ) yy += side; if (yy> (0.5*side) ) yy -= side; if (zz< (-0.5*side) ) zz += side; if (zz> (0.5*side) ) zz -= side; double rd = xx*xx+yy*yy+zz*zz; // if distance is inside cutoff radius compute forces // and contributions to pot. energy and virial if (rd<=rcoff*rcoff) { double rrd = 1.0/rd; double rrd3 = rrd*rrd*rrd; double rrd4 = rrd3*rrd; double r148 = rrd4*(rrd3 - 0.5); epot += rrd3*(rrd3-1.0); vir -= rd*r148; fxi += xx*r148; fyi += yy*r148; fzi += zz*r148; #pragma omp atomic f[j] -= xx*r148; #pragma omp atomic f[j+1] -= yy*r148; #pragma omp atomic f[j+2] -= zz*r148; } } // update forces on particle i #pragma omp atomic f[i] += fxi; #pragma omp atomic f[i+1] += fyi; #pragma omp atomic f[i+2] += fzi; } }
heat_3d-p.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Discretized 3D heat equation stencil with non periodic boundary conditions * Adapted from Pochoir test bench */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 800L #define T 1L #endif #define NUM_FP_OPS 15 /* Define our arrays */ //double A[2][N][N][N]; double total=0; double sum_err_sqr=0; int chtotal=0; /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; long count=0; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; double ****A = (double ****)malloc(2 * sizeof (double ***)); int l; for (l = 0; l < 2; l++){ A[l] = (double ***) malloc(N * sizeof(double **)); for (i = 0; i < N; i++){ A[l][i] = (double **) malloc(N * sizeof(double *)); for (j = 0; j < N; j++) A[l][i][j] = (double *) malloc(N * sizeof (double)); } } printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); A[1][i][j][k] = 0.0; } } } #ifdef TIME gettimeofday(&start, 0); #endif #undef N #define N 400L #undef T #define T 1L /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=0;t1<=T-1;t1++) { lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[1][2*N-1-(t10)][2*N-1-(t14)][t15] = 0.125 * ( ((t10==0)? A[0][(0)][2*N-1-(t14)][t15]: A[0][(2*N-1-(t10)+1)][2*N-1-(t14)][t15]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)-1][2*N-1-(t14)][t15]) + 0.125 * ( ((t14==0)? A[0][2*N-1-(t10)][(0)][t15]: A[0][2*N-1-(t10)][(2*N-1-(t14)+1)][t15]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[0][2*N-1-(t10)][2*N-1-(t14)][(2*N-1)]: A[0][2*N-1-(t10)][2*N-1-(t14)][(t15-1)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)][2*N-1-(t14)][t15+1]) + A[0][2*N-1-(t10)][2*N-1-(t14)][t15];; A[1][t10][t14][2*N-1-(t15)] = 0.125 * ( A[0][t10+1][t14][2*N-1-(t15)] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t10==0)? A[0][(2*N-1)][t14][2*N-1-(t15)]: A[0][(t10-1)][t14][2*N-1-(t15)])) + 0.125 * ( A[0][t10][t14+1][2*N-1-(t15)] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t14==0)? A[0][t10][(2*N-1)][2*N-1-(t15)]: A[0][t10][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[0][t10][t14][2*N-1-(t15)-1] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t15==0)? A[0][t10][t14][(0)]: A[0][t10][t14][(2*N-1-(t15)+1)])) + A[0][t10][t14][2*N-1-(t15)];; A[1][2*N-1-(t10)][t14][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[0][(0)][t14][2*N-1-(t15)]: A[0][(2*N-1-(t10)+1)][t14][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + A[0][2*N-1-(t10)-1][t14][2*N-1-(t15)]) + 0.125 * ( A[0][2*N-1-(t10)][t14+1][2*N-1-(t15)] - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t14==0)? A[0][2*N-1-(t10)][(2*N-1)][2*N-1-(t15)]: A[0][2*N-1-(t10)][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[0][2*N-1-(t10)][t14][2*N-1-(t15)-1] - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t15==0)? A[0][2*N-1-(t10)][t14][(0)]: A[0][2*N-1-(t10)][t14][(2*N-1-(t15)+1)])) + A[0][2*N-1-(t10)][t14][2*N-1-(t15)];; A[1][t10][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( A[0][t10+1][2*N-1-(t14)][2*N-1-(t15)] - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t10==0)? A[0][(2*N-1)][2*N-1-(t14)][2*N-1-(t15)]: A[0][(t10-1)][2*N-1-(t14)][2*N-1-(t15)])) + 0.125 * ( ((t14==0)? A[0][t10][(0)][2*N-1-(t15)]: A[0][t10][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + A[0][t10][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[0][t10][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[0][t10][2*N-1-(t14)][(0)]: A[0][t10][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[0][t10][2*N-1-(t14)][2*N-1-(t15)];; A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[0][(0)][2*N-1-(t14)][2*N-1-(t15)]: A[0][(2*N-1-(t10)+1)][2*N-1-(t14)][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[0][2*N-1-(t10)-1][2*N-1-(t14)][2*N-1-(t15)]) + 0.125 * ( ((t14==0)? A[0][2*N-1-(t10)][(0)][2*N-1-(t15)]: A[0][2*N-1-(t10)][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[0][2*N-1-(t10)][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[0][2*N-1-(t10)][2*N-1-(t14)][(0)]: A[0][2*N-1-(t10)][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[1][t10][2*N-1-(t14)][t15] = 0.125 * ( A[0][t10+1][2*N-1-(t14)][t15] - 2.0 * A[0][t10][2*N-1-(t14)][t15] + ((t10==0)? A[0][(2*N-1)][2*N-1-(t14)][t15]: A[0][(t10-1)][2*N-1-(t14)][t15])) + 0.125 * ( ((t14==0)? A[0][t10][(0)][t15]: A[0][t10][(2*N-1-(t14)+1)][t15]) - 2.0 * A[0][t10][2*N-1-(t14)][t15] + A[0][t10][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[0][t10][2*N-1-(t14)][(2*N-1)]: A[0][t10][2*N-1-(t14)][(t15-1)]) - 2.0 * A[0][t10][2*N-1-(t14)][t15] + A[0][t10][2*N-1-(t14)][t15+1]) + A[0][t10][2*N-1-(t14)][t15];; A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[1][(0)][2*N-1-(t14)][2*N-1-(t15)]: A[1][(2*N-1-(t10)+1)][2*N-1-(t14)][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[1][2*N-1-(t10)-1][2*N-1-(t14)][2*N-1-(t15)]) + 0.125 * ( ((t14==0)? A[1][2*N-1-(t10)][(0)][2*N-1-(t15)]: A[1][2*N-1-(t10)][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[1][2*N-1-(t10)][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[1][2*N-1-(t10)][2*N-1-(t14)][(0)]: A[1][2*N-1-(t10)][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[1][2*N-1-(t10)][t14][t15] = 0.125 * ( ((t10==0)? A[0][(0)][t14][t15]: A[0][(2*N-1-(t10)+1)][t14][t15]) - 2.0 * A[0][2*N-1-(t10)][t14][t15] + A[0][2*N-1-(t10)-1][t14][t15]) + 0.125 * ( A[0][2*N-1-(t10)][t14+1][t15] - 2.0 * A[0][2*N-1-(t10)][t14][t15] + ((t14==0)? A[0][2*N-1-(t10)][(2*N-1)][t15]: A[0][2*N-1-(t10)][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[0][2*N-1-(t10)][t14][(2*N-1)]: A[0][2*N-1-(t10)][t14][(t15-1)]) - 2.0 * A[0][2*N-1-(t10)][t14][t15] + A[0][2*N-1-(t10)][t14][t15+1]) + A[0][2*N-1-(t10)][t14][t15];; A[0][t10][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( A[1][t10+1][2*N-1-(t14)][2*N-1-(t15)] - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t10==0)? A[1][(2*N-1)][2*N-1-(t14)][2*N-1-(t15)]: A[1][(t10-1)][2*N-1-(t14)][2*N-1-(t15)])) + 0.125 * ( ((t14==0)? A[1][t10][(0)][2*N-1-(t15)]: A[1][t10][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + A[1][t10][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[1][t10][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[1][t10][2*N-1-(t14)][(0)]: A[1][t10][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[1][t10][2*N-1-(t14)][2*N-1-(t15)];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[0][2*N-1-(t10)][t14][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[1][(0)][t14][2*N-1-(t15)]: A[1][(2*N-1-(t10)+1)][t14][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + A[1][2*N-1-(t10)-1][t14][2*N-1-(t15)]) + 0.125 * ( A[1][2*N-1-(t10)][t14+1][2*N-1-(t15)] - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t14==0)? A[1][2*N-1-(t10)][(2*N-1)][2*N-1-(t15)]: A[1][2*N-1-(t10)][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[1][2*N-1-(t10)][t14][2*N-1-(t15)-1] - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t15==0)? A[1][2*N-1-(t10)][t14][(0)]: A[1][2*N-1-(t10)][t14][(2*N-1-(t15)+1)])) + A[1][2*N-1-(t10)][t14][2*N-1-(t15)];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[1][t10][t14][t15] = 0.125 * ( A[0][t10+1][t14][t15] - 2.0 * A[0][t10][t14][t15] + ((t10==0)? A[0][(2*N-1)][t14][t15]: A[0][(t10-1)][t14][t15])) + 0.125 * ( A[0][t10][t14+1][t15] - 2.0 * A[0][t10][t14][t15] + ((t14==0)? A[0][t10][(2*N-1)][t15]: A[0][t10][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[0][t10][t14][(2*N-1)]: A[0][t10][t14][(t15-1)]) - 2.0 * A[0][t10][t14][t15] + A[0][t10][t14][t15+1]) + A[0][t10][t14][t15];; A[0][2*N-1-(t10)][2*N-1-(t14)][t15] = 0.125 * ( ((t10==0)? A[1][(0)][2*N-1-(t14)][t15]: A[1][(2*N-1-(t10)+1)][2*N-1-(t14)][t15]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)-1][2*N-1-(t14)][t15]) + 0.125 * ( ((t14==0)? A[1][2*N-1-(t10)][(0)][t15]: A[1][2*N-1-(t10)][(2*N-1-(t14)+1)][t15]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[1][2*N-1-(t10)][2*N-1-(t14)][(2*N-1)]: A[1][2*N-1-(t10)][2*N-1-(t14)][(t15-1)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)][2*N-1-(t14)][t15+1]) + A[1][2*N-1-(t10)][2*N-1-(t14)][t15];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[0][t10][t14][2*N-1-(t15)] = 0.125 * ( A[1][t10+1][t14][2*N-1-(t15)] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t10==0)? A[1][(2*N-1)][t14][2*N-1-(t15)]: A[1][(t10-1)][t14][2*N-1-(t15)])) + 0.125 * ( A[1][t10][t14+1][2*N-1-(t15)] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t14==0)? A[1][t10][(2*N-1)][2*N-1-(t15)]: A[1][t10][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[1][t10][t14][2*N-1-(t15)-1] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t15==0)? A[1][t10][t14][(0)]: A[1][t10][t14][(2*N-1-(t15)+1)])) + A[1][t10][t14][2*N-1-(t15)];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[0][t10][2*N-1-(t14)][t15] = 0.125 * ( A[1][t10+1][2*N-1-(t14)][t15] - 2.0 * A[1][t10][2*N-1-(t14)][t15] + ((t10==0)? A[1][(2*N-1)][2*N-1-(t14)][t15]: A[1][(t10-1)][2*N-1-(t14)][t15])) + 0.125 * ( ((t14==0)? A[1][t10][(0)][t15]: A[1][t10][(2*N-1-(t14)+1)][t15]) - 2.0 * A[1][t10][2*N-1-(t14)][t15] + A[1][t10][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[1][t10][2*N-1-(t14)][(2*N-1)]: A[1][t10][2*N-1-(t14)][(t15-1)]) - 2.0 * A[1][t10][2*N-1-(t14)][t15] + A[1][t10][2*N-1-(t14)][t15+1]) + A[1][t10][2*N-1-(t14)][t15];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[0][2*N-1-(t10)][t14][t15] = 0.125 * ( ((t10==0)? A[1][(0)][t14][t15]: A[1][(2*N-1-(t10)+1)][t14][t15]) - 2.0 * A[1][2*N-1-(t10)][t14][t15] + A[1][2*N-1-(t10)-1][t14][t15]) + 0.125 * ( A[1][2*N-1-(t10)][t14+1][t15] - 2.0 * A[1][2*N-1-(t10)][t14][t15] + ((t14==0)? A[1][2*N-1-(t10)][(2*N-1)][t15]: A[1][2*N-1-(t10)][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[1][2*N-1-(t10)][t14][(2*N-1)]: A[1][2*N-1-(t10)][t14][(t15-1)]) - 2.0 * A[1][2*N-1-(t10)][t14][t15] + A[1][2*N-1-(t10)][t14][t15+1]) + A[1][2*N-1-(t10)][t14][t15];; } } } } } lbp=0; ubp=N-1; #pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15) for (t10=lbp;t10<=ubp;t10++) { for (t11=0;t11<=floord(N-1,32);t11++) { for (t12=0;t12<=floord(N-1,128);t12++) { for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) { lbv=128*t12; ubv=min(N-1,128*t12+127); #pragma ivdep #pragma vector always for (t15=lbv;t15<=ubv;t15++) { A[0][t10][t14][t15] = 0.125 * ( A[1][t10+1][t14][t15] - 2.0 * A[1][t10][t14][t15] + ((t10==0)? A[1][(2*N-1)][t14][t15]: A[1][(t10-1)][t14][t15])) + 0.125 * ( A[1][t10][t14+1][t15] - 2.0 * A[1][t10][t14][t15] + ((t14==0)? A[1][t10][(2*N-1)][t15]: A[1][t10][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[1][t10][t14][(2*N-1)]: A[1][t10][t14][(t15-1)]) - 2.0 * A[1][t10][t14][t15] + A[1][t10][t14][t15+1]) + A[1][t10][t14][t15];; } } } } } } } /* End of CLooG code */ #undef N #define N 800L #undef T #define T 1L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("|Time taken: %7.5lfms\t", tdiff * 1.0e3); printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { total+= A[T%2][i][j][k] ; } } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N)); } } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { chtotal += ((char *)A[T%2][i][j])[k]; } } } printf("|sum(rep(A)) = %d\n", chtotal); #endif for (l = 0; l < 2; l++){ for (i = 0; i < N; i++){ for (j = 0; j < N; j++) free(A[l][i][j]); // = (double *) malloc(N * sizeof (double)); free(A[l][i]); // = (double **) malloc(N * sizeof(double *)); } free(A[l]); // = (double ***) malloc(N * sizeof(double **)); } return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/ // /* @ end @*/
SectionsSectionBodyLink.c
int main() { #pragma omp sections { } #pragma omp sections { #pragma omp section { int x; } } #pragma omp sections { #pragma omp section { 100; } #pragma omp section { 101; } } #pragma omp sections { #pragma omp section { 103; } } #pragma omp sections { #pragma omp section { int x; } #pragma omp section { 105; } #pragma omp section { int x; } } #pragma omp sections { #pragma omp section { int x; } #pragma omp section { int x; } } }
equation_groupnorm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #include <omp.h> #define ALIGNDOWN(N, A) ((N) & ~((A)-1)) #define USE_VECTORIZED_PATH 1 float upconvert_bf16(libxsmm_bfloat16 x) { union libxsmm_bfloat16_hp bf16_hp; bf16_hp.i[1] = x; bf16_hp.i[0] = 0; return bf16_hp.f; } void tpp_groupnorm_fwd_fp32(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel, libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) { LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */ LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); /* [CP,CB] */ LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); /* [CP,CB] */ int np, group_size; group_size = (CP*CB)/G; if (group_size <= CB){ int cp; #pragma omp parallel for collapse(2) for(np = 0; np < NP; np++){ for (cp = 0; cp < CP; cp++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); int i, j, hwb, g; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_matrix_arg arg_array[5]; all_zero_param.out.primary = tmp; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); /*************************** Process entire block code *****************************/ LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW_block, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; add_kernel(&add_param); /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] += new_tmp[cb]; */ /* } */ } for(i=0; i < CB; i += group_size){ g = (cp*CB + i)/group_size; /* determine current group */ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[g]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[g]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[i + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[i + j] = -1 * mean[np*G + g] * s[i + j]; /* -E[X]/sqrt(var(X) + eps) */ } } arg_array[1].primary = s; /* [CB] */ arg_array[2].primary = b; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } else{ /* Case when group_size > CB */ #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, hwb, g; float m, v; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_rows_params, v_reduce_rows_params, m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_matrix_arg arg_array[5]; all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); for (cp = 0; cp < CP; cp++){ /* [cp, HW, CB] */ all_zero_param.out.primary = tmp; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; all_zero_kernel(&all_zero_param); /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] = 0.0f; */ /* } */ reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; add_kernel(&add_param); /* #pragma omp simd */ /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] += new_tmp[cb]; */ /* } */ } if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ g = (cp*CB)/group_size; /* determine current group */ m_reduce_rows_params.in.primary = tmp; m_reduce_rows_params.out.primary = &m; v_reduce_rows_params.in.primary = &tmp[CB]; v_reduce_rows_params.out.primary = &v; reduce_rows_kernel(&m_reduce_rows_params); reduce_rows_kernel(&v_reduce_rows_params); sum_X[g] += m; sum_X2[g] += v; } else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(i=0; i < CB; i += group_size){ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */ } } for (cp = 0; cp < CP; cp++){ arg_array[1].primary = &s[cp*CB]; /* [CB] */ arg_array[2].primary = &b[cp*CB]; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } } void tpp_groupnorm_fwd_bf16(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pinp, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pbeta, float *mean, float *var, libxsmm_bfloat16 *pout, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel, libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) { LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */ LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, out, pout, CP, HW, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, beta, pbeta, CB); int np, group_size; group_size = (CP*CB)/G; if (group_size <= CB){ int cp; #pragma omp parallel for collapse(2) for(np = 0; np < NP; np++){ for (cp = 0; cp < CP; cp++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); int i, j, hwb, g; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_matrix_arg arg_array[5]; all_zero_param.out.primary = tmp; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); /*************************** Process entire block code *****************************/ LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW_block, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; add_kernel(&add_param); /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] += new_tmp[cb]; */ /* } */ } for(i=0; i < CB; i += group_size){ g = (cp*CB + i)/group_size; /* determine current group */ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[g]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[g]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[i + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[i + j] = -1 * mean[np*G + g] * s[i + j]; /* -E[X]/sqrt(var(X) + eps) */ } } arg_array[1].primary = s; /* [CB] */ arg_array[2].primary = b; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } else{ #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float tmp[2*CB], 64); LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, g, hwb; float m, v; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params; libxsmm_meltw_unary_param all_zero_param; libxsmm_meltw_binary_param add_param; libxsmm_matrix_arg arg_array[5]; all_zero_param.out.primary = sum_X; all_zero_G_kernel(&all_zero_param); all_zero_param.out.primary = sum_X2; all_zero_G_kernel(&all_zero_param); LIBXSMM_ALIGNED(float new_tmp[2*CB], 64); for (cp = 0; cp < CP; cp++){ /* [cp, HW, CB] */ all_zero_param.out.primary = tmp; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &tmp[CB]; all_zero_kernel(&all_zero_param); /* #pragma omp simd */ /* for (cb = 0; cb < 2*CB; cb++) { */ /* tmp[cb] = 0.0f; */ /* } */ reduce_HW_params.out.primary = new_tmp; /* [2*CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] -----> [2 * CB] */ reduce_HW_kernel(&reduce_HW_params); add_param.in0.primary = tmp; add_param.in1.primary = new_tmp; add_param.out.primary = tmp; add_kernel(&add_param); add_param.in0.primary = &tmp[CB]; add_param.in1.primary = &new_tmp[CB]; add_param.out.primary = &tmp[CB]; add_kernel(&add_param); /* #pragma omp simd for (cb = 0; cb < 2*CB; cb++) { tmp[cb] += new_tmp[cb]; } */ } if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ g = (cp*CB)/group_size; /* determine current group */ m_reduce_rows_params.in.primary = tmp; m_reduce_rows_params.out.primary = &m; v_reduce_rows_params.in.primary = &tmp[CB]; v_reduce_rows_params.out.primary = &v; reduce_rows_kernel(&m_reduce_rows_params); reduce_rows_kernel(&v_reduce_rows_params); sum_X[g] += m; sum_X2[g] += v; } else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(i=0; i < CB; i += group_size){ m_reduce_groups_params.in.primary = &tmp[i]; m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)]; v_reduce_groups_params.in.primary = &tmp[CB + i]; v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)]; reduce_groups_kernel(&m_reduce_groups_params); reduce_groups_kernel(&v_reduce_groups_params); } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */ b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */ } } for (cp = 0; cp < CP; cp++){ arg_array[1].primary = &s[cp*CB]; /* [CB] */ arg_array[2].primary = &b[cp*CB]; /* [CB] */ arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */ arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */ for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */ eqn_param.inputs = arg_array; eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */ func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ } } } } } void tpp_groupnorm_bwd_fp32(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) { int group_size; group_size = (CP*CB)/G; const float scale = 1.0f / ((float)group_size * HW); LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB); LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); if (group_size <= CB){ #pragma omp parallel { LIBXSMM_ALIGNED(float a[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); LIBXSMM_ALIGNED(float c[CB], 64); LIBXSMM_ALIGNED(float ds[CB], 64); LIBXSMM_ALIGNED(float db[CB], 64); int np, cp; #pragma omp for collapse(2) for (np = 0; np < NP; np++){ for (cp = 0; cp < CP; cp++) { int j, g, hwb, lg; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; /* for(j = 0; j < CB; j++){ dgamma_NP[np*CP*CB + cp*CB + j] = 0.0f; dbeta_NP[np*CP*CB + cp*CB + j] = 0.0f; } */ all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = ds; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = db; all_zero_kernel(&all_zero_param); for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute a and b for each channel from group means and variance */ lg = g - (cp*CB)/group_size; for(j = 0; j < group_size; j++){ a[lg*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[lg*group_size + j] = -a[lg*group_size + j]*mean[np*G + g]; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = ds; arg_array[9].primary = db; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = ds; ds_func(&eqn_param); eqn_param.output.primary = db; db_func(&eqn_param); eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute b and c for each channel from group means and variance */ lg = g - (cp*CB)/group_size; float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[lg*group_size + j]; /* Group ds and db calculation */ gdb += db[lg*group_size + j]; } for(j = 0; j < group_size; j++){ b[lg*group_size + j] = (gdb * mean[np*G + g] - gds) * a[lg*group_size + j] * a[lg*group_size + j] * a[lg*group_size + j] * scale; c[lg*group_size + j] = -b[lg*group_size + j] * mean[np*G + g] - gdb * a[lg*group_size + j] * scale; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = c; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); din_func(&eqn_param); } } } #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } else{ #pragma omp parallel { LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); int np; #pragma omp for for (np = 0; np < NP; np++) { int j, g, cp, hwb; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; /* for(j = 0; j < CP*CB; j++){ */ /* dgamma_NP[np*CP*CB + j] = 0.0f; */ /* dbeta_NP[np*CP*CB + j] = 0.0f; */ /* } */ for (cp = 0; cp < CP; cp++) { all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &ds[cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &db[cp*CB]; all_zero_kernel(&all_zero_param); } for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g]; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = &ds[cp*CB]; arg_array[9].primary = &db[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &ds[cp*CB]; ds_func(&eqn_param); eqn_param.output.primary = &db[cp*CB]; db_func(&eqn_param); eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = &c[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); din_func(&eqn_param); } } } int cp; #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } } void tpp_groupnorm_bwd_bf16(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pdout, libxsmm_bfloat16 *pinp, float *mean, float *var, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pdin, float *pdgamma, float *pdbeta, libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) { int group_size; group_size = (CP*CB)/G; const float scale = 1.0f / ((float)group_size*HW); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din, pdin, CP, HW, CB); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB); LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dout, pdout, CP, HW, CB); LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); if (group_size <= CB){ #pragma omp parallel { LIBXSMM_ALIGNED(float a[CB], 64); LIBXSMM_ALIGNED(float b[CB], 64); LIBXSMM_ALIGNED(float c[CB], 64); LIBXSMM_ALIGNED(float ds[CB], 64); LIBXSMM_ALIGNED(float db[CB], 64); int np, cp; #pragma omp for collapse(2) for (np = 0; np < NP; np++){ for (cp = 0; cp < CP; cp++) { int j, g, hwb, lg; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = ds; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = db; all_zero_kernel(&all_zero_param); for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute a and b for each channel from group means and variance */ lg = g - (cp*CB)/group_size; for(j = 0; j < group_size; j++){ a[lg*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[lg*group_size + j] = -a[lg*group_size + j]*mean[np*G + g]; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = ds; arg_array[9].primary = db; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = ds; ds_func(&eqn_param); eqn_param.output.primary = db; db_func(&eqn_param); eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute b and c for each channel from group means and variance */ lg = g - (cp*CB)/group_size; float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[lg*group_size + j]; /* Group ds and db calculation */ gdb += db[lg*group_size + j]; } for(j = 0; j < group_size; j++){ b[lg*group_size + j] = (gdb * mean[np*G + g] - gds) * a[lg*group_size + j] * a[lg*group_size + j] * a[lg*group_size + j] * scale; c[lg*group_size + j] = -b[lg*group_size + j] * mean[np*G + g] - gdb * a[lg*group_size + j] * scale; } } arg_array[1].primary = a; arg_array[2].primary = b; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = c; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); din_func(&eqn_param); } } } #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } else{ #pragma omp parallel { LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); int np; #pragma omp for for (np = 0; np < NP; np++) { int j, g, cp, hwb; libxsmm_matrix_eqn_param eqn_param; libxsmm_meltw_unary_param all_zero_param; libxsmm_matrix_arg arg_array[10]; eqn_param.inputs = arg_array; for (cp = 0; cp < CP; cp++) { all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &ds[cp*CB]; all_zero_kernel(&all_zero_param); all_zero_param.out.primary = &db[cp*CB]; all_zero_kernel(&all_zero_param); } for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g]; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB]; arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[8].primary = &ds[cp*CB]; arg_array[9].primary = &db[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &ds[cp*CB]; ds_func(&eqn_param); eqn_param.output.primary = &db[cp*CB]; db_func(&eqn_param); eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB]; dgamma_func(&eqn_param); eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB]; dbeta_func(&eqn_param); } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { arg_array[1].primary = &a[cp*CB]; arg_array[2].primary = &b[cp*CB]; arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); arg_array[7].primary = &c[cp*CB]; for(hwb=0; hwb < num_HW_blocks; hwb++){ arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); din_func(&eqn_param); } } } int cp; #pragma omp for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } } } void scaler_groupnorm_fwd_fp32(long NP, long CP, long HW, long CB, long G, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){ LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */ LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); int np, group_size; group_size = (CP*CB)/G; #pragma omp parallel for for(np = 0; np < NP; np++){ LIBXSMM_ALIGNED(float sum_X[G], 64); LIBXSMM_ALIGNED(float sum_X2[G], 64); LIBXSMM_ALIGNED(float s[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); int i, j, cp, cb, hw, g; float m, v, value; for(g = 0; g < G; g++){ sum_X[g] = 0.0f; sum_X2[g] = 0.0f; } for(cp = 0; cp < CP; cp++){ /* Size = CP*HW*CB*4 */ m = 0.0f; v = 0.0f; if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */ for(cb = 0; cb < CB; cb++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB); m += value; v += (value*value); } } g = (cp*CB)/group_size; /* determine current group */ sum_X[g] += m; sum_X2[g] += v; } else{ for(i=0; i < CB; i += group_size){ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */ for(j = 0; j < group_size; j++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, (i + j), CP, HW, CB); sum_X[cp*(CB/group_size) + (i/group_size)] += value; sum_X2[cp*(CB/group_size) + (i/group_size)] += (value*value); } } } } } for(g = 0; g < G; g++){ /* mean and variance calculation */ /* Size = 2*CP*CB*4 */ mean[np*G + g] = sum_X[g] / ((float)group_size * HW); var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 [G] */ for(j = 0; j < group_size; j++){ s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* s = 1/sqrt(var(X) + eps) [CP, CB] */ b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* b = -E[X]/sqrt(var(X) + eps) [CP, CB] */ } } for(cp = 0; cp < CP; cp++){ /* Size = 2*CP*HW*CB*4 + 2*CP*CB*4 */ for(cb = 0; cb < CB; cb++){ for(hw = 0; hw < HW; hw++){ value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB); value = ((value * s[cp*CB + cb]) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */ LIBXSMM_VLA_ACCESS(4, out, np, cp, hw, cb, CP, HW, CB) = value; } } } } /*End multithreading loop*/ } void scaler_groupnorm_bwd_fp32(long NP, long CP, long HW, long CB, long G, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps) { int np, group_size; group_size = (CP*CB)/G; float scale = 1.0f / ((float)group_size * HW); LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB); LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB); LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64); LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64); #pragma omp parallel for for(np = 0; np < NP; np++){ int j, cp, cb, hw, g; LIBXSMM_ALIGNED(float a[CP*CB], 64); LIBXSMM_ALIGNED(float b[CP*CB], 64); LIBXSMM_ALIGNED(float c[CP*CB], 64); LIBXSMM_ALIGNED(float ds[CP*CB], 64); LIBXSMM_ALIGNED(float db[CP*CB], 64); for(j = 0; j < CP*CB; j++){ dgamma_NP[np*CP*CB + j] = 0.0f; dbeta_NP[np*CP*CB + j] = 0.0f; } for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */ for(j = 0; j < group_size; j++){ a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g]; ds[g*group_size + j] = 0.0f; db[g*group_size + j] = 0.0f; } } for (cp = 0; cp < CP; cp++) { /* dgamma += (a * inp + b) * dout , dbeta += dout, ds += dout * gamma * inp, db += dout * gamma */ /* Size = 2*CP*HW*CB*4 */ for (cb = 0; cb < CB; cb++) { for (hw = 0; hw < HW; hw++){ dgamma_NP[np*CP*CB + cp*CB + cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB); dbeta_NP[np*CP*CB + cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB); ds[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB); db[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB); } } } /* b = (db * mean[nb] - ds) * a * a * a * scale; */ /* c = -b * mean[nb] - db * a * scale; */ for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */ float gds = 0.0f; float gdb = 0.0f; for(j = 0; j < group_size; j++){ gds += ds[g*group_size + j]; /* Group ds and db calculation */ gdb += db[g*group_size + j]; } for(j = 0; j < group_size; j++){ b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale; c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale; } } for (cp = 0; cp < CP; cp++) { /* din = dout * a * gamma + b * inp + c */ /* Size = 3*CP*HW*CB*4 */ for (cb = 0; cb < CB; cb++) { for (hw = 0; hw < HW; hw++){ LIBXSMM_VLA_ACCESS(4, din, np, cp, hw, cb, CP, HW, CB) = LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB) + c[cp*CB + cb]; } } } } int cp; #pragma omp parallel for for (cp = 0; cp < CP; cp++) { for (np=0; np < NP; np++ ) { int cb; for(cb = 0; cb < CB; cb++){ LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb]; LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb]; } } } } int main( int argc, char* argv[] ) { libxsmm_blasint my_eqn10, my_eqn11, my_eqn12, my_eqn13, my_eqn14, my_eqn15; libxsmm_matrix_eqn_function func10, func11, func12, func13, func14, func15; libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE; libxsmm_meltw_unary_type unary_type; libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_HW_kernel, reduce_groups_kernel; const float eps = FLT_EPSILON; libxsmm_blasint i, it, ld, tmp_ld, tmp_ld2; unsigned long long l_start, l_end; double l_total = 0, l_total2 = 0; double t_vec = 0, t_tpp = 0; libxsmm_matdiff_info norms_out; float *inp, *out, *dinp, *dout, *eqn_dinp, *eqn_dout, *dbeta, *eqn_dbeta, *dgamma, *eqn_dgamma, *eqn_out, *gamma, *beta, *cache_fl, *mean, *var; libxsmm_bfloat16 *bf16_inp, *bf16_out, *bf16_dinp, *bf16_dout, *bf16_eqn_dinp, *bf16_eqn_dout, *bf16_gamma, *bf16_beta, *bf16_eqn_out; int NP = 28; int CP = 2; int HW = 784; int CB = 64; int G = 1; long num_HW_blocks = 16; int datatype_mode = 0; int iters = 100; libxsmm_datatype in_dt = LIBXSMM_DATATYPE_F32; libxsmm_datatype out_dt = LIBXSMM_DATATYPE_F32; if ( argc > 1 ) NP = atoi(argv[1]); if ( argc > 2 ) CP = atoi(argv[2]); if ( argc > 3 ) HW = atoi(argv[3]); if ( argc > 4 ) CB = atoi(argv[4]); if ( argc > 5 ) G = atoi(argv[5]); if ( argc > 6 ) num_HW_blocks = atoi(argv[6]); if ( argc > 7 ) datatype_mode = atoi(argv[7]); if ( argc > 8 ) iters = atoi(argv[8]); if (datatype_mode == 0) { in_dt = LIBXSMM_DATATYPE_F32; out_dt = LIBXSMM_DATATYPE_F32; } else if (datatype_mode == 1) { in_dt = LIBXSMM_DATATYPE_BF16; out_dt = LIBXSMM_DATATYPE_BF16; } else { printf("ERROR: Supporting only FP32 and BF16 precisions...\n"); } inp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); eqn_dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); eqn_dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); eqn_dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); eqn_dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); gamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); beta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152); mean = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*G, 2097152); var = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*G, 2097152); eqn_out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152); cache_fl = (float*) libxsmm_aligned_malloc( sizeof(float)*1024*1024, 2097152); bf16_inp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_eqn_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_eqn_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); bf16_gamma = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152); bf16_beta = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152); bf16_eqn_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152); libxsmm_init(); libxsmm_matdiff_clear(&norms_out); /* Initializing arrays */ for ( i = 0; i < NP*CP*HW*CB; ++i ) { inp[i] = (float)libxsmm_rng_f64(); out[i] = (float)libxsmm_rng_f64(); eqn_out[i] = out[i]; dinp[i] = (float)libxsmm_rng_f64(); dout[i] = (float)libxsmm_rng_f64(); eqn_dinp[i] = dinp[i]; eqn_dout[i] = dout[i]; libxsmm_rne_convert_fp32_bf16( &inp[i], &bf16_inp[i], 1 ); libxsmm_rne_convert_fp32_bf16( &out[i], &bf16_out[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_out[i], &bf16_eqn_out[i], 1 ); libxsmm_rne_convert_fp32_bf16( &dout[i], &bf16_dout[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_dout[i], &bf16_eqn_dout[i], 1 ); libxsmm_rne_convert_fp32_bf16( &dinp[i], &bf16_dinp[i], 1 ); libxsmm_rne_convert_fp32_bf16( &eqn_dinp[i], &bf16_eqn_dinp[i], 1 ); } for ( i = 0; i < CP*CB; ++i ) { gamma[i] = (float)libxsmm_rng_f64(); beta[i] = (float)libxsmm_rng_f64(); dbeta[i] = (float)libxsmm_rng_f64(); dgamma[i] = (float)libxsmm_rng_f64(); eqn_dbeta[i] = dbeta[i]; eqn_dgamma[i] = dgamma[i]; libxsmm_rne_convert_fp32_bf16( &gamma[i], &bf16_gamma[i], 1 ); libxsmm_rne_convert_fp32_bf16( &beta[i], &bf16_beta[i], 1 ); } for (i = 0; i < 1024 * 1024; i++ ) { cache_fl[i] = (float)libxsmm_rng_f64(); } libxsmm_blasint ldo = G; libxsmm_meltwfunction_unary all_zero_G_kernel = libxsmm_dispatch_meltw_unary(G, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( all_zero_G_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero group copy kernel failed. Bailing...!\n"); exit(-1); } ldo = CB; libxsmm_meltwfunction_unary all_zero_kernel = libxsmm_dispatch_meltw_unary(CB, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR); if ( all_zero_G_kernel == NULL) { fprintf( stderr, "JIT for initialization by unary all zero copy kernel failed. Bailing...!\n"); exit(-1); } libxsmm_meltwfunction_unary copy_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ldo, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY); if ( copy_kernel == NULL) { fprintf( stderr, "JIT for initialization by copy kernel failed. Bailing...!\n"); exit(-1); } /* TPPs for reducing X and X2 in HW*/ ld = CB; tmp_ld = CB; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS; reduce_HW_kernel = libxsmm_dispatch_meltw_unary(CB, HW/num_HW_blocks, &ld, &tmp_ld, in_dt, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); libxsmm_blasint group_size = (CP*CB)/G; libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(CB, 1, &ld, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_MELTW_TYPE_BINARY_ADD); if ( add_kernel == NULL) { fprintf( stderr, "JIT for initialization of add kernel failed. Bailing...!\n"); exit(-1); } /* TPP for reducing groups */ ld = group_size; /* group_size = (CP*CB)/G */ tmp_ld = 1; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; reduce_groups_kernel = libxsmm_dispatch_meltw_unary(group_size, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); ld = CB; tmp_ld = 1; unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD; jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; reduce_rows_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type); /* TPP for foward */ ld = CB; tmp_ld = 1; tmp_ld2 = 1; my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32); libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32); libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* x = [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 1, 0, LIBXSMM_DATATYPE_F32 ); /* s = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 3, 0, in_dt ); /* gamma = [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 4, 0, in_dt ); /* beta = [CB] */ func10 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, out_dt, my_eqn10 ); /* y = [HW, CB] */ /* Check correctness */ if (datatype_mode == 0) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); } else if (datatype_mode == 1) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); for ( i = 0; i < NP*CP*HW*CB; ++i ) { /* out[i] = upconvert_bf16(bf16_out[i]); */ eqn_out[i] = upconvert_bf16(bf16_eqn_out[i]); } } /* compare */ printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 FWD Groupnorm - Output #\n"); } else { printf("# Correctness BF16 FWD Groupnorm - Output #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*HW*CB, 1, out, eqn_out, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); if (datatype_mode == 0) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Unit time FWD = %.5g\n", ((double)(l_total))); tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP groupnorm time FWD = %.5g\n", ((double)(l_total2))); printf("Speedup FWD is %.5g\n", l_total/l_total2); } else if (datatype_mode == 1) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler FP32 groupnorm time FWD = %.5g\n", ((double)(l_total))); tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP BF16 groupnorm time FWD = %.5g\n", ((double)(l_total2))); printf("Speedup FWD is %.5g\n", l_total/l_total2); } t_tpp = l_total2; t_vec = l_total; /* Group norm equations */ /* Create MatEq for bwd layernorm */ ld = CB; tmp_ld2 = 1; /* dgamma function */ my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* dgamma = ((inp *a + b) * dout) + dgamma */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn11, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* ((inp *a + b) * dout) */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn11, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32); libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 4, 0, LIBXSMM_DATATYPE_F32 ); /* dgamma [CB] */ func11 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn11 ); /* dgamma [CB] */ /* dbeta function */ my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [CB] = dout [HW, CB] + dbeta [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn12, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* dbeta_tmp [HW, CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn12, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, 1, 1, 5, 0, LIBXSMM_DATATYPE_F32 ); /* dbeta [CB] */ func12 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn12 ); /* dbeta [CB] */ /* db new equation */ my_eqn13 = libxsmm_matrix_eqn_create(); /* db [CB] = (dout * gamma) [HW, CB] + db [CB]*/ libxsmm_matrix_eqn_push_back_binary_op(my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* db [CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn13, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 9, 0, LIBXSMM_DATATYPE_F32 ); /* db [CB] */ func13 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn13 ); /* db [CB] */ /* ds new equation */ my_eqn14 = libxsmm_matrix_eqn_create(); /* ds [CB] = ((dout * gamma) * inp) [HW, CB] + ds [CB] */ libxsmm_matrix_eqn_push_back_binary_op(my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */ libxsmm_matrix_eqn_push_back_unary_op(my_eqn14, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */ libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); /*(dout * gamma)*/ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 8, 0, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */ func14 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn14 ); /* ds [CB] */ /* din equation */ my_eqn15 = libxsmm_matrix_eqn_create(); /* din = ((gamma * a) * dout) + (inp * b + c) */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32); libxsmm_matrix_eqn_push_back_binary_op( my_eqn15, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */ libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32); libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */ libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 7, 0, LIBXSMM_DATATYPE_F32 ); /* c [CB] */ func15 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, in_dt, my_eqn15 ); /* din [HW, CB] */ if (datatype_mode == 0) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); } else if (datatype_mode == 1) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); for ( i = 0; i < NP*CP*HW*CB; ++i ) { /* dinp[i] = upconvert_bf16(bf16_dinp[i]); */ eqn_dinp[i] = upconvert_bf16(bf16_eqn_dinp[i]); } } /* compare */ printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dinput #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dinput #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*HW*CB, 1, dinp, eqn_dinp, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); printf("###########################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dbeta #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dbeta #\n"); } printf("###########################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dbeta, eqn_dbeta, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); printf("############################################\n"); if (datatype_mode == 0) { printf("# Correctness FP32 BWD Groupnorm - Dgamma #\n"); } else { printf("# Correctness BF16 BWD Groupnorm - Dgamma #\n"); } printf("############################################\n"); libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dgamma, eqn_dgamma, 0, 0); printf("L1 reference : %.25g\n", norms_out.l1_ref); printf("L1 test : %.25g\n", norms_out.l1_tst); printf("L2 abs.error : %.24f\n", norms_out.l2_abs); printf("L2 rel.error : %.24f\n", norms_out.l2_rel); printf("Linf abs.error: %.24f\n", norms_out.linf_abs); printf("Linf rel.error: %.24f\n", norms_out.linf_rel); printf("Check-norm : %.24f\n\n", norms_out.normf_rel); if (datatype_mode == 0) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler groupnorm time BWD = %.5g\n", ((double)(l_total))); tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP groupnorm time BWD = %.5g\n", ((double)(l_total2))); printf("Speedup BWD is %.5g\n", l_total/l_total2); } else if (datatype_mode == 1) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps); } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("Scaler FP32 groupnorm time BWD = %.5g\n", ((double)(l_total))); tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); l_start = libxsmm_timer_tick(); for (it = 0; it < iters; it++) { tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps); } l_end = libxsmm_timer_tick(); l_total2 = libxsmm_timer_duration(l_start, l_end); printf("TPP BF16 groupnorm time BWD = %.5g\n", ((double)(l_total2))); printf("Speedup BWD is %.5g\n", l_total/l_total2); } /* printf("Running sum is %.5f\n", sum); */ t_tpp += l_total2; t_vec += l_total; printf("\n\n=================================\n"); printf("Total Speedup via TPP Matrix equation is %.5g\n", t_vec/t_tpp); printf("=================================\n"); libxsmm_free(inp); libxsmm_free(out); libxsmm_free(dinp); libxsmm_free(dout); libxsmm_free(eqn_dinp); libxsmm_free(eqn_dout); libxsmm_free(bf16_dinp); libxsmm_free(bf16_dout); libxsmm_free(bf16_eqn_dinp); libxsmm_free(bf16_eqn_dout); libxsmm_free(dgamma); libxsmm_free(dbeta); libxsmm_free(eqn_dgamma); libxsmm_free(eqn_dbeta); libxsmm_free(mean); libxsmm_free(var); libxsmm_free(gamma); libxsmm_free(beta); libxsmm_free(eqn_out); libxsmm_free(bf16_inp); libxsmm_free(bf16_out); libxsmm_free(bf16_gamma); libxsmm_free(bf16_beta); libxsmm_free(bf16_eqn_out); libxsmm_free(cache_fl); return 0; }
traverse.h
#ifndef traverse_eager_h #define traverse_eager_h #include "exafmm.h" #include "kernel.h" namespace exafmm { //! Recursive call to post-order tree traversal for upward pass void upwardPass(Cell * Ci) { for (Cell * Cj=Ci->child; Cj!=Ci->child+Ci->numChilds; Cj++) { #pragma omp task untied if(Cj->numBodies > 100) upwardPass(Cj); } #pragma omp taskwait if(Ci->numChilds==0) P2M(Ci); M2M(Ci); } //! Upward pass interface void upwardPass(Cells & cells) { #pragma omp parallel #pragma omp single nowait upwardPass(&cells[0]); } //! Upward pass to fill in missing numBodies and M void upwardPassLET(Cell * Ci) { for (Cell * Cj=Ci->child; Cj!=Ci->child+Ci->numChilds; Cj++) { #pragma omp task untied if(Cj->numBodies > 100) upwardPassLET(Cj); } #pragma omp taskwait real_t M = 0; for (int n=0; n<NTERM; n++) M += std::abs(Ci->M[n]); if (Ci->numChilds==0) { if (M < EPS) { P2M(Ci); } } else { for (int n=0; n<NTERM; n++) Ci->M[n] = 0; M2M(Ci); } } //! Upward pass LET interface void upwardPassLET(Cells & cells) { #pragma omp parallel #pragma omp single nowait upwardPassLET(&cells[0]); } //! Recursive call to dual tree traversal for horizontal pass void horizontalPass(Cell * Ci, Cell * Cj) { vec3 dX; for (int d=0; d<3; d++) dX[d] = Ci->X[d] - Cj->X[d] - IX[d] * CYCLE; real_t R2 = norm(dX) * THETA * THETA; if (R2 > (Ci->R + Cj->R) * (Ci->R + Cj->R)) { M2L(Ci, Cj); } else if (Ci->numChilds == 0 && Cj->numChilds == 0) { if (Cj->numBodies == 0) { assert((Ci->R+Cj->R)/std::sqrt(norm(dX)) < THETA*1.1); M2L(Ci, Cj); } else { P2P(Ci, Cj); } } else if (Cj->numChilds == 0 || (Ci->R >= Cj->R && Ci->numChilds != 0)) { for (Cell * ci=Ci->child; ci!=Ci->child+Ci->numChilds; ci++) { #pragma omp task untied if(ci->numBodies > 100) firstprivate(IX) horizontalPass(ci, Cj); } } else { for (Cell * cj=Cj->child; cj!=Cj->child+Cj->numChilds; cj++) { horizontalPass(Ci, cj); } } #pragma omp taskwait } //! Horizontal pass for periodic images void periodic(Cell * Ci0, Cell * Cj0) { Cells pcells(27); for (size_t c=0; c<pcells.size(); c++) { pcells[c].M.resize(NTERM, 0.0); pcells[c].L.resize(NTERM, 0.0); } Cell * Ci = &pcells.back(); *Ci = *Cj0; Ci->child = &pcells[0]; Ci->numChilds = 26; for (int level=0; level<IMAGES-1; level++) { for (int ix=-1; ix<=1; ix++) { for (int iy=-1; iy<=1; iy++) { for (int iz=-1; iz<=1; iz++) { if (ix != 0 || iy != 0 || iz != 0) { for (int cx=-1; cx<=1; cx++) { for (int cy=-1; cy<=1; cy++) { for (int cz=-1; cz<=1; cz++) { IX[0] = ix * 3 + cx; IX[1] = iy * 3 + cy; IX[2] = iz * 3 + cz; M2L(Ci0, Ci); } } } } } } } Cell * Cj = &pcells[0]; for (int ix=-1; ix<=1; ix++) { for (int iy=-1; iy<=1; iy++) { for (int iz=-1; iz<=1; iz++) { if (ix != 0 || iy != 0 || iz != 0) { Cj->X[0] = Ci->X[0] + ix * CYCLE; Cj->X[1] = Ci->X[1] + iy * CYCLE; Cj->X[2] = Ci->X[2] + iz * CYCLE; Cj->M = Ci->M; Cj++; } } } } M2M(Ci); CYCLE *= 3; } } //! Horizontal pass interface void horizontalPass(Cells & icells, Cells & jcells) { #pragma omp parallel #pragma omp single if (IMAGES == 0) { horizontalPass(&icells[0], &jcells[0]); } else { for (IX[0]=-1; IX[0]<=1; IX[0]++) { for (IX[1]=-1; IX[1]<=1; IX[1]++) { for (IX[2]=-1; IX[2]<=1; IX[2]++) { horizontalPass(&icells[0], &jcells[0]); } } } real_t saveCycle = CYCLE; periodic(&icells[0], &jcells[0]); CYCLE = saveCycle; } } //! Recursive call to pre-order tree traversal for downward pass void downwardPass(Cell * Cj) { L2L(Cj); if (Cj->numChilds==0) L2P(Cj); for (Cell * Ci=Cj->child; Ci!=Cj->child+Cj->numChilds; Ci++) { #pragma omp task untied if(Ci->numBodies > 100) downwardPass(Ci); } #pragma omp taskwait } //! Downward pass interface void downwardPass(Cells & cells) { #pragma omp parallel #pragma omp single nowait downwardPass(&cells[0]); } //! Direct summation void direct(Bodies & bodies, Bodies & jbodies) { Cells cells(2); Cell * Ci = &cells[0]; Cell * Cj = &cells[1]; Ci->body = &bodies[0]; Ci->numBodies = bodies.size(); Cj->body = &jbodies[0]; Cj->numBodies = jbodies.size(); int prange = (std::pow(3,IMAGES) - 1) / 2; for (int ix=-prange; ix<=prange; ix++) { for (int iy=-prange; iy<=prange; iy++) { for (int iz=-prange; iz<=prange; iz++) { IX[0] = ix; IX[1] = iy; IX[2] = iz; P2P(Ci, Cj); } } } } } #endif
Pragma.h
//===- Pragma.h - Pragma registration and handling --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PRAGMA_H #define LLVM_CLANG_LEX_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <string> namespace clang { class PragmaNamespace; class Preprocessor; class Token; /** * Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: PragmaHandler() = default; explicit PragmaHandler(StringRef name) : Name(name) {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return nullptr; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: explicit EmptyPragmaHandler(StringRef Name = StringRef()); void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. llvm::StringMap<PragmaHandler *> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} ~PragmaNamespace() override; /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() const { return Handlers.empty(); } void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; PragmaNamespace *getIfNamespace() override { return this; } }; } // namespace clang #endif // LLVM_CLANG_LEX_PRAGMA_H
OmpForBeginLink.c
int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { } }
GB_unaryop__identity_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp32 // op(A') function: GB_tran__identity_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
apriori.c
/*Copyright(C) 2016 João Saffran <joaosaffran@gmail.com> * * apriori.c - Apriori Methods Implementation */ #include <assert.h> #include "apriori.h" /* * Checks if has combination in an item of the database */ static inline int has_item_in_line(const struct Item *it, char *line) { int resp = 1; char *token = malloc(LINE_MAX*sizeof(char)); int aux[it->num_itens]; size_t j = 0; memset(aux, 0, it->num_itens*sizeof(int)); memset(token, 0, LINE_MAX*sizeof(char)); for (int i = 0; i < it->num_itens; i++) { for (int k = 0; ((it->item[j] != (',')) && (j < strlen(it->item))); k++, j++) { token[k] = it->item[j]; } ++j; if (strstr(line, token) != NULL) { aux[i] = 1; } } free(token); for (int i = 0; i < it->num_itens; i++) { resp = (resp && aux[i]); } return (resp); } /* * Counts the frequency of the combinations specified. */ void count_combinations_frequency(int ini_pos, int end_pos) { #pragma omp parallel for for (int i = 0; i< DATABASE_SIZE; i++) { for (int j = ini_pos; j < end_pos; j++) { if (has_item_in_line(&data[j], database[i])) { ++data[j].frequency; } } } } /* * Determines if items are frequent. */ void determinate_frequents(struct Item *dataset, int begin, int end, int threshold) { for (int i = begin; i< end; i++) { dataset[i].isFrequent = (dataset[i].frequency >= threshold); } } /* * Combines two databases itens. */ void combine(struct Item *dataset1, int size1, struct Item *dataset2, int size2, int num_threads) { struct Item tmp_data[10000*num_threads][num_threads]; int aux_lpi[num_threads]; memset(aux_lpi, 0, num_threads * sizeof(int)); #pragma omp parallel { int tid = omp_get_thread_num(); int lpi = 0; char *str; str = malloc( LINE_MAX*sizeof(char) ); assert(str != NULL); #pragma omp for for (int i = 0; i < size1; i++) { if (!dataset1[i].isFrequent) { continue; } for (int j = 0; j < size2; j++) { if (!dataset2[j].isFrequent) { continue; } if (strcmp(dataset1[i].item,dataset2[j].item) < 0) { strcat(str, dataset1[i].item); strcat(str, ","); strcat(str, dataset2[j].item); } else { strcat(str, dataset2[j].item); strcat(str, ","); strcat(str, dataset1[i].item); } tmp_data[lpi][tid].num_itens = dataset1[i].num_itens + 1; tmp_data[lpi++][tid].item = str; str = NULL; free(str); str = malloc( LINE_MAX * sizeof(char) ); } } free(str); aux_lpi[tid] = lpi; } for(int i = 0; i< num_threads; i++) { for(int j = 0; j< aux_lpi[i];j++) { data[pos_to_insert++] = tmp_data[j][i]; } } }
GB_binop__land_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_bool) // A.*B function (eWiseMult): GB (_AemultB_08__land_bool) // A.*B function (eWiseMult): GB (_AemultB_02__land_bool) // A.*B function (eWiseMult): GB (_AemultB_04__land_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_bool) // A*D function (colscale): GB (_AxD__land_bool) // D*A function (rowscale): GB (_DxB__land_bool) // C+=B function (dense accum): GB (_Cdense_accumB__land_bool) // C+=b function (dense accum): GB (_Cdense_accumb__land_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_bool) // C=scalar+B GB (_bind1st__land_bool) // C=scalar+B' GB (_bind1st_tran__land_bool) // C=A+scalar GB (_bind2nd__land_bool) // C=A'+scalar GB (_bind2nd_tran__land_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij && bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x && y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x && bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij && y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x && aij) ; \ } GrB_Info GB (_bind1st_tran__land_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij && y) ; \ } GrB_Info GB (_bind2nd_tran__land_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
computePDF.c
/************** computePDF.c Functions used to compute the PDFs for several dimensionalities. Copyright (c) 2014, Unai Lopez-Novoa, Jon Saenz, Alexander Mendiburu and Jose Miguel-Alonso (from Universidad del Pais Vasco/Euskal Herriko Unibertsitatea) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Universidad del Pais Vasco/Euskal Herriko Unibertsitatea nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************/ #include "computePDF.h" #include "linalg.h" double volumeConstant(int dim) { if(dim == 1) return 2.; else if(dim == 2) return acos(-1.); else if (dim == 3) return acos(-1.)*4./3.; else return unit_sphere_volume(dim); } /**** Functions to calculate the PDF of a defined 2D space (box) for a given sample. ****/ //Compute the density in the bounding box of a sample - Function for 2D spaces void compute2DBox_2D(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos, double * x0, double * dx, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition) { int u,v,l; //Loop variables double temp; //Will contain the absolute distance value from gridpoint to sample. double PCdot[2] __attribute__((aligned(64))); for(gridpoint[0] = lower[0], u = 0; u < tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++) { int HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]); //Compiler flag to inform about structure alignment __assume_aligned(densValues,64); __assume_aligned(densPosition,64); #ifdef _OPENMP #pragma simd private(PCdot,temp) assert #endif for(v = 0; v < tot_ev_per_dim[1]; v++) { //Conversion to PC space PCdot[0] = (eigenvectors->me[0][0] * gridpoint[0]) + (eigenvectors->me[0][1] * (lower[1] + (dx[1] * v))); PCdot[1] = (eigenvectors->me[1][0] * gridpoint[0]) + (eigenvectors->me[1][1] * (lower[1] + (dx[1] * v))); //Absolute distance calculation temp = (((PC[0] - PCdot[0]) * (PC[0] - PCdot[0])) + ((PC[1] - PCdot[1]) * (PC[1] - PCdot[1])) ) / h2; //If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure //Vector densPosition will contain the position of the gridpoint in the final PDF structure #ifdef _OPENMP //PDFposition densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]); densValues[v] = (0.5/cd*(2+2.)*(1.-temp)) * (fabs(temp)<1.); //If serial version, store the density value of the sample over the gridpoint in the PDF structure #else gridpoint[1] = (lower[1] + (dx[1] * v)); dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0]; dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1]; *PDFitem(pdf ,dif_pos, 2) += (0.5/cd*(2+2.)*(1.-temp)) * (fabs(temp)<1.) ; #endif } #ifdef _OPENMP for(v = 0; v < tot_ev_per_dim[1]; v++) #pragma omp atomic pdf->PDF[densPosition[v]] += densValues[v]; #endif } } void compute2DBox_3D(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos, double * x0,double * dx, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition) { int u,v,l; //Loop variables double temp; //Will contain the absolute distance value from gridpoint to sample. double PCdot[3] __attribute__((aligned(64))); //Compiler flag to inform about structure alignment __assume_aligned(densValues,64); __assume_aligned(densPosition,64); for(gridpoint[0] = lower[0], u = 0; u <= tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++) { int HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]) + (((gridpoint[2] - x0[2])/ dx[2]) * pdf->pdfcumsize[2]); #pragma simd private(PCdot) assert for(v = 0; v <= tot_ev_per_dim[1]; v++) { //Conversion to PC space PCdot[0] = (eigenvectors->me[0][0] * gridpoint[0]) + (eigenvectors->me[0][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[0][2] * gridpoint[2]); PCdot[1] = (eigenvectors->me[1][0] * gridpoint[0]) + (eigenvectors->me[1][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[1][2] * gridpoint[2]); PCdot[2] = (eigenvectors->me[2][0] * gridpoint[0]) + (eigenvectors->me[2][1] * (lower[1] + (dx[1] * v))) + (eigenvectors->me[2][2] * gridpoint[2]); //Absolute distance calculation temp = (((PC[0] - PCdot[0]) * (PC[0] - PCdot[0])) + ((PC[1] - PCdot[1]) * (PC[1] - PCdot[1])) + ((PC[2] - PCdot[2]) * (PC[2] - PCdot[2]))) / h2; //If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure //Vector densPosition will contain the position of the gridpoint in the final PDF structure #ifdef _OPENMP //PDFposition densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]); densValues[v] = (0.5/cd*(3+2.)*(1.-temp)) * (fabs(temp)<1.); //If serial version, store the density value of the sample over the gridpoint in the PDF structure #else gridpoint[1] = (lower[1] + (dx[1] * v)); dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0]; dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1]; dif_pos[2] = (gridpoint[2] - x0[2])/ dx[2]; *PDFitem(pdf ,dif_pos, 3) += (0.5/cd*(3+2.)*(1.-temp)) * (fabs(temp)<1.) ; #endif } #ifdef _OPENMP for(v = 0; v <= tot_ev_per_dim[1]; v++) #pragma omp atomic pdf->PDF[densPosition[v]] += densValues[v]; #endif } } //Compute the density in the bounding box of a sample - Generic function, used for spaces of dimensionality higher than 3 void compute2DBox_ND(PDFPtr pdf, double * PC, double * lower, int * tot_ev_per_dim, double * gridpoint, size_t * dif_pos, double * x0, double * dx, int dim, double h2, double cd, MAT * eigenvectors, double * restrict densValues, int * restrict densPosition, double * restrict PCdot_vec, double * restrict temp_vec, double * restrict gridpoint_vec) { int u,v,d,l; //Loop variables int HalfPosition; int dimGreaterThanTwoPosition = 0; double HalfTemp = 0; #ifdef _OPENMP //Initializations for vector implementation #pragma simd reduction(+:dimGreaterThanTwoPosition) assert for(d = 2; d < dim; d++) dimGreaterThanTwoPosition += (dif_pos[d] * pdf->pdfcumsize[d]); for(v = 0; v < tot_ev_per_dim[1]; v++) for(d = 2; d < dim; d++) gridpoint_vec[v * dim + d] = gridpoint[d]; #endif for(gridpoint[0] = lower[0], u = 0; u < tot_ev_per_dim[0]; gridpoint[0] += dx[0], u++) { //Compiler flag to inform about structure alignment __assume_aligned(densValues,64); __assume_aligned(densPosition,64); #ifdef _OPENMP //Vector friendly implementation HalfPosition = (((gridpoint[0] - x0[0])/ dx[0]) * pdf->pdfcumsize[0]) + dimGreaterThanTwoPosition; for(v = 0; v < tot_ev_per_dim[1]; v++) gridpoint_vec[v * dim + 0] = gridpoint[0]; for(v = 0; v < tot_ev_per_dim[1]; v++) temp_vec[v] = 0; for(v = 0; v < tot_ev_per_dim[1]; v++) gridpoint_vec[v * dim + 1] = (lower[1] + (dx[1] * v)); for(v = 0; v < tot_ev_per_dim[1] * dim; v++) PCdot_vec[v] = 0; for(v = 0; v < tot_ev_per_dim[1]; v++) for(d = 0; d < dim; d++) #pragma simd reduction(+:PCdot_vec[v * dim + d]) assert for(l = 0; l < dim; l++) PCdot_vec[v * dim + d] += (eigenvectors->me[d][l] * gridpoint_vec[v * dim + l]); for(v = 0; v < tot_ev_per_dim[1]; v++) #pragma simd reduction(+:temp_vec[v]) assert for(d = 0; d < dim; d++) temp_vec[v] += ((PC[d] - PCdot_vec[v * dim + d]) * (PC[d] - PCdot_vec[v * dim + d])); for(v = 0; v < tot_ev_per_dim[1]; v++) temp_vec[v] /= h2; for(v = 0; v < tot_ev_per_dim[1]; v++) densPosition[v] = HalfPosition + ((((lower[1] + (dx[1] * v)) - x0[1])/ dx[1]) * pdf->pdfcumsize[1]); for(v = 0; v < tot_ev_per_dim[1]; v++) densValues[v] = (0.5/cd*(dim + 2.)*(1.-temp_vec[v])) * (fabs(temp_vec[v])<1.); for(v = 0; v < tot_ev_per_dim[1]; v++) #pragma omp atomic pdf->PDF[densPosition[v]] += densValues[v]; #else // Serial implementation double temp; dif_pos[0] = (gridpoint[0] - x0[0])/ dx[0]; for(v = 0; v < tot_ev_per_dim[1]; v++) { gridpoint[1] = (lower[1] + (dx[1] * v)); //Conversion to PC space for(d = 0; d < dim; d++) PCdot_vec[d] = 0; for(d = 0; d < dim; d++) #pragma simd reduction(+:PCdot_vec[d]) assert for(l = 0; l < dim; l++) PCdot_vec[d] += (eigenvectors->me[d][l] * gridpoint[l]); //Absolute distance calculation temp = 0; #pragma simd reduction(+:temp) assert for(d = 0; d < dim; d++) temp += ((PC[d] - PCdot_vec[d]) * (PC[d] - PCdot_vec[d])); temp /= h2; dif_pos[1] = (gridpoint[1] - x0[1])/ dx[1]; *PDFitem(pdf ,dif_pos, dim) += (0.5/cd*(dim + 2.)*(1.-temp)) * (fabs(temp)<1.) ; } #endif } } /**** Functions to calculate PDF, called from main ****/ //Compute the PDF of a one-dimensional grid space void computePDF1D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0, double *x1, double *dx, double *bounds, MAT *eigenvectors ) { int i,j,u; //Loop variables int dim = 1; //Dimensions of grid space double cd = volumeConstant(dim); //Volume constants to calculate kernel values double h2=h*h; //Squared bandwith value double *PC; // Current sample (PC space) double theintegral = 0.0; double total_vol = 0.0; double * sample; double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space double PCdot; //Variables to calculate coordinates and number of gridpoints of bounding box int steps; double upper, lower, gridpoint; int tot_ev; size_t dif_pos[1]; double abs_bound,temp; //Auxiliary vectors for OpenMP version double * densValues; int * densPosition; #pragma omp parallel default(none) \ shared(stdout,mpdf,pdf,dim,x0,x1,dx,theintegral,total_vol,bounds,eigenvectors,cd,h2,k) \ private(i,j,u,sample,PC,lower,upper,steps,abs_bound,tot_ev,dif_pos,gridpoint,PCdot,densValues,densPosition,temp) { #ifdef _OPENMP int dim0_max_size = ((ceil(bounds[0] / dx[0]) * 2) + 3); densValues = (double *)_mm_malloc(sizeof(double) * dim0_max_size,64); //Vector to hold density values of each sample-gridpoint combination densPosition = (int *)_mm_malloc(sizeof(int) * dim0_max_size,64); //Vector to hold the positions of densValues values in the PDF structure #endif //Initialize PDF structure to 0s #pragma omp for for(i = 0; i < pdf->total_size; i++) pdf->PDF[i] = 0.0f; //Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure #pragma omp for for(i=0;i<mpdf->current;i++) { sample = MPDFPosition(mpdf,i); //Get current sample PC = MPDFPCPosition(mpdf,i); //Get current sample (scaled as PC) //For each sample, calculate its boundaries //Lower corner abs_bound = sample[0] - bounds[0]; if (x0[0] > abs_bound) lower = x0[0]; else { steps = floor((abs_bound - x0[0]) / dx[0]); lower = x0[0] + (steps * dx[0]); } //Upper corner abs_bound = sample[0] + bounds[0]; if (x1[0] < abs_bound) upper = x1[0]; else { steps = ceil((abs_bound - x0[0]) / dx[0]); upper = x0[0] + (steps * dx[0]); } //Calculate number of eval points per dimension tot_ev = rint((upper - lower)/dx[0]) + 1; //Calculate the PDF of the defined 1D space #ifdef _OPENMP #pragma simd private(PCdot,temp) assert #endif for(u = 0; u < tot_ev; u++) { PCdot = (eigenvectors->me[0][0] * (lower + (dx[0] * u))); //Absolute distance calculation temp = ((PC[0] - PCdot) * (PC[0] - PCdot)) / h2; //If OpenMP version, store the density value in an auxiliar vector densValues, previous to storing in the final PDF structure //Vector densPosition will contain the position of the gridpoint in the final PDF structure #ifdef _OPENMP //PDFposition densPosition[u] = (((lower + (dx[0] * u)) - x0[0])/ dx[0]) * pdf->pdfcumsize[0]; densValues[u] = (0.5/cd*(1+2.)*(1.-temp)) * (fabs(temp)<1.); //If serial version, store the density value of the sample over the gridpoint in the PDF structure #else dif_pos[0] = ((lower + (dx[0] * u)) - x0[0])/ dx[0]; *PDFitem(pdf ,dif_pos, 1) += (0.5/cd*(1+2.)*(1.-temp)) * (fabs(temp)<1.) ; #endif } #ifdef _OPENMP for(u = 0; u < tot_ev; u++) #pragma omp atomic pdf->PDF[densPosition[u]] += densValues[u]; #endif } #ifdef _OPENMP _mm_free(densValues); _mm_free(densPosition); #endif //Apply k constant to PDF #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i] * k; //Calculate integral of PDF #pragma omp for reduction(+:theintegral) for(i=0; i < pdf->total_size; i++) theintegral += pdf->PDF[i]; #pragma omp single theintegral = theintegral * dx[0]; //Renormalize PDF using integral #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i]/theintegral; //Calculate total volume of renormalized PDF #pragma omp for reduction(+:total_vol) for(i=0; i < pdf->total_size; i++) total_vol += pdf->PDF[i]; }//End of parallel OpenMP Region printf("Total integrated PDF: %g. The integral: %f\n",total_vol*dx[0],theintegral); } //Compute the PDF of a 2D grid space void computePDF2D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0, double *x1, double *dx, double *bounds, MAT *eigenvectors ) { int i,j; //Loop variables int dim = 2; //Dimensions of grid space double cd = volumeConstant(dim); //Volume constants to calculate kernel values double h2=h*h; //Squared bandwith value double *PC; // Current sample (PC space) double theintegral = 0.0; double total_vol = 0.0; double total_dx = dx[0] * dx[1]; double * sample; double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space double * PCdot; //Variables to calculate coordinates and number of gridpoints of bounding box int steps; double upper, lower[2], gridpoint[2]; int tot_ev_per_dim[2]; size_t dif_pos[2]; double abs_bound; //Auxiliary vectors for OpenMP version double * densValues; int * densPosition; #pragma omp parallel default(none) \ shared(mpdf,pdf,dim,x0,x1,dx,total_dx,theintegral,total_vol,bounds,eigenvectors,cd,h2,k) \ private(i,j,sample,PC,lower,upper,steps,abs_bound,tot_ev_per_dim,dif_pos,gridpoint,PCdot,densValues,densPosition) { #ifdef _OPENMP int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3); densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64); //Vector to hold density values of each sample-gridpoint combination densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64); //Vector to hold the positions of densValues values in the PDF structure #endif //Initialize PDF structure to 0s #pragma omp for for(i = 0; i < pdf->total_size; i++) pdf->PDF[i] = 0.0f; //Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure #pragma omp for for(i=0;i<mpdf->current;i++) { sample = MPDFPosition(mpdf,i); //Get current sample PC = MPDFPCPosition(mpdf,i); //Get current sample (scaled as PC) //For each sample, calculate its bounding box, //expressed as coordinates of lower corner and number of gridpoints per dimensions for(j = 0; j < 2; j++) { //Lower corner abs_bound = sample[j] - bounds[j]; if (x0[j] > abs_bound) lower[j] = x0[j]; else { steps = floor((abs_bound - x0[j]) / dx[j]); lower[j] = x0[j] + (steps * dx[j]); } //Upper corner abs_bound = sample[j] + bounds[j]; if (x1[j] < abs_bound) upper = x1[j]; else { steps = ceil((abs_bound - x0[j]) / dx[j]); upper = x0[j] + (steps * dx[j]); } //Calculate number of eval points per dimension tot_ev_per_dim[j] = rint((upper - lower[j])/dx[j]) + 1; } //Calculate the PDF of the defined 2D box compute2DBox_2D(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,h2,cd,eigenvectors,densValues,densPosition); } #ifdef _OPENMP _mm_free(densValues); _mm_free(densPosition); #endif //Apply k constant to PDF #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i] * k; //Calculate integral of PDF #pragma omp for reduction(+:theintegral) for(i=0; i < pdf->total_size; i++) theintegral += pdf->PDF[i]; #pragma omp single theintegral = theintegral * total_dx; //Renormalize PDF using integral #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i]/theintegral; //Calculate total volume of renormalized PDF #pragma omp for reduction(+:total_vol) for(i=0; i < pdf->total_size; i++) total_vol += pdf->PDF[i]; }//End of parallel OpenMP Region printf("Total integrated PDF: %g. The integral: %f\n",total_vol*dx[0]*dx[1],theintegral); } #define DEBUG_TEMPS 1 #undef DEBUG_TEMPS //Compute the PDF of grid spaces of dimension 3 or higher void computePDF3D(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0, double *x1, double *dx, double *bounds, MAT *eigenvectors) { int dim = 3; int i,j,l,u,w; //Loop variables double cd = volumeConstant(dim); //Volume constant double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space double h2=h*h; //Square of bandwith value double *PC; // Current sample (PC space) double total_vol=0.0; double theintegral=0.0; double * sample; //Current sample double * PCdot; //Variables to calculate the bounding box of a sample double lower[3]; double upper; double gridpoint[3]; int tot_ev_per_dim[3]; size_t dif_pos[3]; int total_ev; int steps; double abs_bound; //Absolute bound per sample and dimension, given by ellipsoid shape //Calculate acumulated volume for the grid space double total_dx = 1.0; for (i = 0; i < dim; i++) total_dx *= dx[i]; //Variables to perform the calculation of the 2D layering double A,B,C,F,Z,theta,cosTheta,sinTheta,X2,Y2,X,Y,XY,termY2,valor,termX2,upy,rightx,upx_rot,upy_rot,rightx_rot,righty_rot; double bound[2],box_center[2],box_min[2],box_max[2],box_steps[2],box_upper[2]; //Calculate partial equations for the 2D layering A = Sm1->me[0][0]; B = 2 * Sm1->me[0][1]; C = Sm1->me[1][1]; theta = atan(B/(A-C))/2; cosTheta = cos(theta); sinTheta = sin(theta); X2 = Sm1->me[0][0]*cosTheta*cosTheta + 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*sinTheta*sinTheta; XY = -2*Sm1->me[0][0]*cosTheta*sinTheta + 2*Sm1->me[0][1]*cosTheta*cosTheta - 2*Sm1->me[0][1]*sinTheta*sinTheta + 2*Sm1->me[1][1]*cosTheta*sinTheta; Y2 = Sm1->me[0][0]*sinTheta*sinTheta - 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*cosTheta*cosTheta; //Aux vector for OpenMP version double * densValues; int * densPosition; double * temp_vec; double * PCdot_vec; double * gridpoint_vec; //Beginning of OpenMP parallel region #pragma omp parallel default(none)\ shared(stdout,theintegral,total_vol,total_dx,k,mpdf,pdf,cd,dim,bounds,x0,x1,dx,Sm1,cosTheta,sinTheta,eigenvectors,X2,XY,Y2,h2,h) \ private(i,j,l,u,w,sample,PC,gridpoint,total_ev,abs_bound,lower,box_upper,tot_ev_per_dim,box_steps,F,X,Y,Z,termX2,termY2,upy,rightx,upx_rot,upy_rot, \ valor,rightx_rot,righty_rot,bound,box_center,box_min,box_max,PCdot,dif_pos,steps,upper,densValues,densPosition,temp_vec,gridpoint_vec,PCdot_vec) { #ifdef _OPENMP int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3); densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64); densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64); #endif //Initialize PDF structure to 0s #pragma omp for for(i = 0; i < pdf->total_size; i++) pdf->PDF[i] = 0.0f; //Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure #pragma omp for for(i=0;i<mpdf->current;i++) { sample = MPDFPosition(mpdf,i); //Get current sample PC = MPDFPCPosition(mpdf,i); //X is the current sample (scaled as PC) //Calculate boundaries for Z axis //Lower corner abs_bound = sample[2] - bounds[2]; if (x0[2] > abs_bound) lower[2] = x0[2]; else { steps = floor((abs_bound - x0[2]) / dx[2]); lower[2] = x0[2] + (steps * dx[2]); } //Upper corner abs_bound = sample[2] + bounds[2]; if (x1[2] < abs_bound) upper = x1[2]; else { steps = ceil((abs_bound - x0[2]) / dx[2]); upper = x0[2] + (steps * dx[2]); } //Calculate number of grid points per dimension total_ev = rint((upper - lower[2])/dx[2]) + 1; //For each gridpoint in dimensions 3 to N for(j = 0; j < total_ev; j++) { //Calculate location of grid point gridpoint[2] = lower[2] + (dx[2] * j); dif_pos[2] = (gridpoint[2] - x0[2])/ dx[2]; /* This code calculates, a 2D plane formed by the first two dimensions of the space, the optimal * box inside the initial bounding box */ Z = gridpoint[2] - sample[2]; //X,Y, along with X2,XY,Y2 form the equation of the 2D rotated plane F = Sm1->me[2][2] * Z * Z - 1; X = 2*Sm1->me[0][2]*Z*cosTheta + 2*Sm1->me[1][2]*Z*sinTheta; Y = -2*Sm1->me[0][2]*Z*sinTheta + 2*Sm1->me[1][2]*Z*cosTheta; //Calculate displacements and obtain formula (x-xo)^2 / a^2 + % (y-yo)^2/b^2 = 1 termX2 = (X/X2)/2; termY2 = (Y/Y2)/2; valor = -F + termX2*termX2*X2 + termY2*termY2*Y2; //Calculate new rotated bounding box. UP and RIGHT are the corners of the new bounding box upy = sqrt(1/(Y2/valor)) * h; rightx = sqrt(1/(X2/valor)) * h; upx_rot = 0 * cosTheta + upy * sinTheta; upy_rot = -0 * sinTheta + upy * cosTheta; rightx_rot = rightx * cosTheta + 0 * sinTheta; righty_rot = -rightx * sinTheta + 0 * cosTheta; //Calculate original displacement (rotated ellipse) box_center[0] = termX2*cosTheta-termY2*sinTheta; box_center[1] = termX2*sinTheta+termY2*cosTheta; bound[0] = sqrt(upx_rot*upx_rot+rightx_rot*rightx_rot); bound[1] = sqrt(upy_rot*upy_rot+righty_rot*righty_rot); //Calculate lower and upper bound of new BoundingBox for(u = 0; u < 2; u++) { box_min[u] = (sample[u] - box_center[u]) - bound[u]; box_steps[u] = floor((box_min[u] - x0[u]) / dx[u]); lower[u] = (x0[u] > box_min[u])?(x0[u]):(x0[u] + (box_steps[u] * dx[u])); box_max[u] = (sample[u] - box_center[u]) + bound[u]; box_steps[u] = ceil((box_max[u] - x0[u]) / dx[u]); box_upper[u] = (x1[u] < box_max[u])?(x1[u]):(x0[u] + (box_steps[u] * dx[u])); tot_ev_per_dim[u] = rint((box_upper[u] - lower[u])/dx[u]); } //Calculate the PDF of the defined 2D box compute2DBox_3D(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,h2,cd,eigenvectors,densValues,densPosition); }//End of "per gridpoint" for } //End of "per sample" for #ifdef _OPENMP _mm_free(densValues); _mm_free(densPosition); #endif //Apply k constant to PDF #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i] * k; //Calculate integral of PDF #pragma omp for reduction(+:theintegral) for(i=0; i < pdf->total_size; i++) theintegral += pdf->PDF[i]; #pragma omp single theintegral = theintegral * total_dx; //Renormalize PDF using integral #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i]/theintegral; //Calculate total volume of renormalized PDF #pragma omp for reduction(+:total_vol) for(i=0; i < pdf->total_size; i++) total_vol += pdf->PDF[i]; }//End of parallel OpenMP Region printf("Total integrated PDF: %g. The integral: %f\n",total_vol*total_dx,theintegral); } //Compute the PDF of grid spaces of dimension 3 or higher void computePDFND(MPDFEstimatorPtr mpdf, PDFPtr pdf, MAT *Sm1 , double h , double detSm1 , double *x0, double *x1, double *dx, double *bounds, MAT *eigenvectors, int dim) { int i,j,l,u,w; //Loop variables double cd = volumeConstant(dim); //Volume constant double k=1./sqrt(detSm1)/mpdf->current/pow(h,mpdf->length); //Constant to recover the volume in the X space from the volume in the PC space double h2=h*h; //Square of bandwith value double *PC; // Current sample (PC space) double total_vol=0.0; double theintegral=0.0; double * sample; //Current sample double * PCdot; //Variables to calculate the bounding box of a sample double * lower; double upper; double * gridpoint; int * tot_ev_per_dim; size_t * dif_pos; int total_ev; int steps; double abs_bound; //Absolute bound per sample and dimension, given by ellipsoid shape //Calculate acumulated volume for the grid space double total_dx = 1.0; for (i = 0; i < dim; i++) total_dx *= dx[i]; //Variables to perform the calculation of the 2D layering double A,B,C,F,Z,theta,cosTheta,sinTheta,X2,Y2,X,Y,XY,termY2,valor,termX2,upy,rightx,upx_rot,upy_rot,rightx_rot,righty_rot; double bound[2],box_center[2],box_min[2],box_max[2],box_steps[2],box_upper[2]; //Calculate partial equations for the 2D layering A = Sm1->me[0][0]; B = 2 * Sm1->me[0][1]; C = Sm1->me[1][1]; theta = atan(B/(A-C))/2; cosTheta = cos(theta); sinTheta = sin(theta); X2 = Sm1->me[0][0]*cosTheta*cosTheta + 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*sinTheta*sinTheta; XY = -2*Sm1->me[0][0]*cosTheta*sinTheta + 2*Sm1->me[0][1]*cosTheta*cosTheta - 2*Sm1->me[0][1]*sinTheta*sinTheta + 2*Sm1->me[1][1]*cosTheta*sinTheta; Y2 = Sm1->me[0][0]*sinTheta*sinTheta - 2*Sm1->me[0][1]*cosTheta*sinTheta + Sm1->me[1][1]*cosTheta*cosTheta; //Aux vector for OpenMP version double * densValues; int * densPosition; double * temp_vec; double * PCdot_vec; double * gridpoint_vec; //Beginning of OpenMP parallel region #pragma omp parallel default(none)\ shared(stdout,theintegral,total_vol,total_dx,k,mpdf,pdf,cd,dim,bounds,x0,x1,dx,Sm1,cosTheta,sinTheta,eigenvectors,X2,XY,Y2,h2,h) \ private(i,j,l,u,w,sample,PC,gridpoint,total_ev,abs_bound,lower,box_upper,tot_ev_per_dim,box_steps,F,X,Y,Z,termX2,termY2,upy,rightx,upx_rot,upy_rot, \ valor,rightx_rot,righty_rot,bound,box_center,box_min,box_max,PCdot,dif_pos,steps,upper,densValues,densPosition,temp_vec,gridpoint_vec,PCdot_vec) { //Allocate variables to calculate the bounding box of a sample lower = (double *)malloc(sizeof(double) * dim); gridpoint = (double *)malloc(sizeof(double) * dim); tot_ev_per_dim = (int *)malloc(sizeof(int) * dim); dif_pos = (size_t *)malloc(sizeof(size_t) * dim); #ifdef _OPENMP int dim1_max_size = ((ceil(bounds[1] / dx[1]) * 2) + 3); densValues = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64); densPosition = (int *)_mm_malloc(sizeof(int) * dim1_max_size,64); temp_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size,64); gridpoint_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size * dim,64); PCdot_vec = (double *)_mm_malloc(sizeof(double) * dim1_max_size * dim,64); #else PCdot_vec = (double *)malloc(sizeof(double) * dim); #endif //Initialize PDF structure to 0s #pragma omp for for(i = 0; i < pdf->total_size; i++) pdf->PDF[i] = 0.0f; //Main calculation loop. For each sample calculate the PDF of its influence area and store in the PDF structure #pragma omp for for(i=0;i<mpdf->current;i++) { sample = MPDFPosition(mpdf,i); //Get current sample PC = MPDFPCPosition(mpdf,i); //X is the current sample (scaled as PC) //For each sample, calculate its bounding box, //expressed as coordinates of lower corner and number of gridpoints per dimensions total_ev = 1; for(j = 2; j < dim; j++) { //Lower corner abs_bound = sample[j] - bounds[j]; if (x0[j] > abs_bound) lower[j] = x0[j]; else { steps = floor((abs_bound - x0[j]) / dx[j]); lower[j] = x0[j] + (steps * dx[j]); } //Upper corner abs_bound = sample[j] + bounds[j]; if (x1[j] < abs_bound) upper = x1[j]; else { steps = ceil((abs_bound - x0[j]) / dx[j]); upper = x0[j] + (steps * dx[j]); } //Calculate number of grid points per dimension tot_ev_per_dim[j] = rint((upper - lower[j])/dx[j]) + 1; total_ev *= tot_ev_per_dim[j] ; } //For each gridpoint in dimensions 3 to N for(j = 0; j < total_ev; j++) { //Calculate location of grid point int divisor; int eval_point = j; for(u = 2; u < dim-1; u++) { divisor = 1; for(w = u+1; w < dim; w++) divisor *= tot_ev_per_dim[w]; gridpoint[u] = lower[u] + (dx[u] * (eval_point / divisor)); eval_point = eval_point % divisor; } gridpoint[dim-1] = lower[dim-1] + (dx[dim-1] * eval_point); //Last case //Fill structure with gridpoint position for(l = 2; l < dim; l++) dif_pos[l] = (gridpoint[l] - x0[l])/ dx[l]; /* This code calculates, a 2D plane formed by the first two dimensions of the space, the optimal * box inside the initial bounding box */ Z = gridpoint[2] - sample[2]; //X,Y, along with X2,XY,Y2 form the equation of the 2D rotated plane F = Sm1->me[2][2] * Z * Z - 1; X = 2*Sm1->me[0][2]*Z*cosTheta + 2*Sm1->me[1][2]*Z*sinTheta; Y = -2*Sm1->me[0][2]*Z*sinTheta + 2*Sm1->me[1][2]*Z*cosTheta; //Calculate displacements and obtain formula (x-xo)^2 / a^2 + % (y-yo)^2/b^2 = 1 termX2 = (X/X2)/2; termY2 = (Y/Y2)/2; valor = -F + termX2*termX2*X2 + termY2*termY2*Y2; //Calculate new rotated bounding box. UP and RIGHT are the corners of the new bounding box upy = sqrt(1/(Y2/valor)) * h; rightx = sqrt(1/(X2/valor)) * h; upx_rot = 0 * cosTheta + upy * sinTheta; upy_rot = -0 * sinTheta + upy * cosTheta; rightx_rot = rightx * cosTheta + 0 * sinTheta; righty_rot = -rightx * sinTheta + 0 * cosTheta; //Calculate original displacement (rotated ellipse) box_center[0] = termX2*cosTheta-termY2*sinTheta; box_center[1] = termX2*sinTheta+termY2*cosTheta; bound[0] = sqrt(upx_rot*upx_rot+rightx_rot*rightx_rot); bound[1] = sqrt(upy_rot*upy_rot+righty_rot*righty_rot); //Calculate lower and upper bound of new BoundingBox for(u = 0; u < 2; u++) { box_min[u] = (sample[u] - box_center[u]) - bound[u]; box_steps[u] = floor((box_min[u] - x0[u]) / dx[u]); lower[u] = (x0[u] > box_min[u])?(x0[u]):(x0[u] + (box_steps[u] * dx[u])); box_max[u] = (sample[u] - box_center[u]) + bound[u]; box_steps[u] = ceil((box_max[u] - x0[u]) / dx[u]); box_upper[u] = (x1[u] < box_max[u])?(x1[u]):(x0[u] + (box_steps[u] * dx[u])); tot_ev_per_dim[u] = rint((box_upper[u] - lower[u])/dx[u]); } //Calculate the PDF of the defined 2D box compute2DBox_ND(pdf,PC,lower,tot_ev_per_dim,gridpoint,dif_pos,x0,dx,dim,h2,cd,eigenvectors,densValues,densPosition,PCdot_vec,temp_vec,gridpoint_vec); }//End of "per gridpoint" for } //End of "per sample" for //Delete memory structures created by threads free(lower); free(tot_ev_per_dim); free(dif_pos); free(gridpoint); #ifdef _OPENMP _mm_free(densValues); _mm_free(densPosition); _mm_free(PCdot_vec); _mm_free(temp_vec); _mm_free(gridpoint_vec); #else free(PCdot_vec); #endif //Apply k constant to PDF #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i] * k; //Calculate integral of PDF #pragma omp for reduction(+:theintegral) for(i=0; i < pdf->total_size; i++) theintegral += pdf->PDF[i]; #pragma omp single theintegral = theintegral * total_dx; //Renormalize PDF using integral #pragma omp for for(i=0; i < pdf->total_size; i++) pdf->PDF[i] = pdf->PDF[i]/theintegral; //Calculate total volume of renormalized PDF #pragma omp for reduction(+:total_vol) for(i=0; i < pdf->total_size; i++) total_vol += pdf->PDF[i]; }//End of parallel OpenMP Region printf("Total integrated PDF: %g. The integral: %f\n",total_vol*total_dx,theintegral); }
GB_binop__atan2_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__atan2_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__atan2_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__atan2_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__atan2_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__atan2_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__atan2_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__atan2_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__atan2_fp32) // C=scalar+B GB (_bind1st__atan2_fp32) // C=scalar+B' GB (_bind1st_tran__atan2_fp32) // C=A+scalar GB (_bind2nd__atan2_fp32) // C=A'+scalar GB (_bind2nd_tran__atan2_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = atan2f (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = atan2f (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN2 || GxB_NO_FP32 || GxB_NO_ATAN2_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__atan2_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__atan2_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__atan2_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__atan2_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__atan2_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__atan2_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__atan2_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__atan2_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__atan2_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = atan2f (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__atan2_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = atan2f (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = atan2f (x, aij) ; \ } GrB_Info GB (_bind1st_tran__atan2_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = atan2f (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__atan2_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subref_template.c
//------------------------------------------------------------------------------ // GB_subref_template: C = A(I,J), or C = pattern (A(I,J)) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #if defined ( GB_SYMBOLIC ) // symbolic method must tolerate zombies #define GB_Ai(p) GB_UNFLIP (Ai [p]) #else // numeric method will not see any zombies #define GB_Ai(p) Ai [p] #endif // to iterate across all entries in a bucket: #define GB_for_each_index_in_bucket(inew,i) \ for (int64_t inew = Mark[i]-1 ; inew >= 0 ; inew = Inext [inew]) // copy values from A(:,kA) to C(:,kC): Cx [pC:pC+len-1] = ... (pA:pA+len-1). #if defined ( GB_SYMBOLIC ) // symbolic copy: Cx is int64_t; Ax is ignored #define GB_COPY_RANGE(pC,pA,len) \ for (int64_t k = 0 ; k < (len) ; k++) \ { \ Cx [(pC) + k] = (pA) + k ; \ } #else // numeric copy: Cx and Ax are both (GB_void *), and point to the same type #define GB_COPY_RANGE(pC,pA,len) \ memcpy (Cx + (pC)*asize, Ax + (pA)*asize, (len) * asize) ; #endif // copy a single value from A(:,kA) to C(:,kC): Cx [pC] = ... (pA]) #if defined ( GB_SYMBOLIC ) // symbolic copy: Cx is int64_t; Ax is ignored #define GB_COPY_ENTRY(pC,pA) \ Cx [pC] = (pA) ; #else // numeric copy: Cx and Ax are both (GB_void *), and point to the same type #define GB_COPY_ENTRY(pC,pA) \ /* Cx [pC] = Ax [pA] */ \ memcpy (Cx + (pC)*asize, Ax + (pA)*asize, asize) ; #endif // the type of Cx #if defined ( GB_SYMBOLIC ) // C is an int64_t array; the type of A is ignored #define GB_CTYPE int64_t #define GB_CSIZE1 1 #define GB_CSIZE2 (sizeof (int64_t)) #else // C and A have the same type #define GB_CTYPE GB_void #define GB_CSIZE1 asize #define GB_CSIZE2 asize #endif { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; const int64_t avlen = A->vlen ; #if defined ( GB_SYMBOLIC ) const int64_t nzombies = A->nzombies ; #endif #if defined ( GB_PHASE_2_OF_2 ) && defined ( GB_NUMERIC ) ASSERT (C->type = A->type) ; const GB_void *GB_RESTRICT Ax = (GB_void *) A->x ; const int64_t asize = A->type->size ; #endif //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // get I //-------------------------------------------------------------------------- // these values are ignored if Ikind == GB_LIST int64_t ibegin = Icolon [GxB_BEGIN] ; int64_t iinc = Icolon [GxB_INC ] ; int64_t inc = (iinc < 0) ? (-iinc) : iinc ; #ifdef GB_DEBUG int64_t iend = Icolon [GxB_END ] ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,kC); phase2: compute C //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast < 0) ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; } // a coarse task accesses all of I for all its vectors int64_t pI = 0 ; int64_t pI_end = nI ; int64_t ilen = nI ; ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ; //---------------------------------------------------------------------- // compute all vectors C(:,kfirst:klast) for this task //---------------------------------------------------------------------- for (int64_t kC = kfirst ; kC <= klast ; kC++) { //------------------------------------------------------------------ // get C(:,kC) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) // phase1 simply counts the # of entries in C(*,kC). int64_t clen = 0 ; #else // This task computes all or part of C(:,kC), which are the entries // in Ci,Cx [pC:pC_end-1]. int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,kC) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ; } else { // The vectors of C are never sliced for a coarse task, so this // task computes all of C(:,kC). pC = Cp [kC] ; pC_end = Cp [kC+1] ; } int64_t clen = pC_end - pC ; if (clen == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,kA) //------------------------------------------------------------------ int64_t pA, pA_end ; if (fine_task) { // a fine task computes a slice of a single vector C(:,kC). // The task accesses Ai,Ax [pA:pA_end-1], which holds either // the entire vector A(imin:imax,kA) for method 6, the entire // dense A(:,kA) for methods 1 and 2, or a slice of the // A(imin:max,kA) vector for all other methods. pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // a coarse task computes the entire vector C(:,kC). The task // accesses all of A(imin:imax,kA), for most methods, or all of // A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in // Ai,Ax [pA:pA_end-1]. pA = Ap_start [kC] ; pA_end = Ap_end [kC] ; } int64_t alen = pA_end - pA ; if (alen == 0) continue ; //------------------------------------------------------------------ // get I //------------------------------------------------------------------ if (fine_task) { // A fine task accesses I [pI:pI_end-1]. For methods 2 and 6, // pI:pI_end is a subset of the entire 0:nI-1 list. For all // other methods, pI = 0 and pI_end = nI, and the task can // access all of I. pI = TaskList [taskid].pB ; pI_end = TaskList [taskid].pB_end ; ilen = pI_end - pI ; } //------------------------------------------------------------------ // determine the method to use //------------------------------------------------------------------ int method ; if (fine_task) { // The method that the fine task uses for its slice of A(*,kA) // and C(*,kC) has already been determined by GB_subref_slice. method = (int) (-TaskList [taskid].klast) ; } else { // determine the method based on A(*,kA) and I method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, (Mark != NULL), need_qsort, iinc, nduplicates) ; } //------------------------------------------------------------------ // extract C (:,kC) = A (I,kA): consider all cases //------------------------------------------------------------------ switch (method) { //-------------------------------------------------------------- case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense //-------------------------------------------------------------- // A (:,kA) has not been sliced ASSERT (Ikind == GB_ALL) ; ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // copy the entire vector and construct indices #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { int64_t inew = k + pI ; ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ; ASSERT (inew == GB_Ai (pA + inew)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA + pI, ilen) ; #endif break ; //-------------------------------------------------------------- case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense //-------------------------------------------------------------- // This method handles any kind of list I, but A(:,kA) // must be dense. A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I and get the entry in A(:,kA) via direct lookup #if defined ( GB_PHASE_1_OF_2 ) clen = ilen ; #else for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A(i,kA), and it always exists. int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; ASSERT (i == GB_Ai (pA + i)) ; Ci [pC + k] = inew ; GB_COPY_ENTRY (pC + k, pA + i) ; } #endif break ; //-------------------------------------------------------------- case 3 : // the list I has a single index, ibegin //-------------------------------------------------------------- // binary search in GB_subref_phase0 has already found it. // This can be any Ikind with nI=1: GB_ALL with A->vlen=1, // GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0 // (with length 1), or a GB_LIST with ni=1. // Time: 50x faster than MATLAB ASSERT (!fine_task) ; ASSERT (alen == 1) ; ASSERT (nI == 1) ; ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen = 1 ; #else Ci [pC] = 0 ; GB_COPY_ENTRY (pC, pA) ; #endif break ; //-------------------------------------------------------------- case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA) //-------------------------------------------------------------- // Time: 1x MATLAB but low speedup on the Mac. Why? // Probably memory bound since it is just memcpy's. ASSERT (Ikind == GB_ALL && ibegin == 0) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else #if defined ( GB_SYMBOLIC ) if (nzombies == 0) { memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; } else { // with zombies for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ; Ci [pC + k] = i ; } } #else memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ; #endif GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 5 : // Ikind is GB_RANGE = ibegin:iend //-------------------------------------------------------------- // Time: much faster than MATLAB. Good speedup too. ASSERT (Ikind == GB_RANGE) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = 0 ; k < alen ; k++) { int64_t i = GB_Ai (pA + k) ; int64_t inew = i - ibegin ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC + k] = inew ; } GB_COPY_RANGE (pC, pA, alen) ; #endif break ; //-------------------------------------------------------------- case 6 : // I is short vs nnz (A (:,kA)), use binary search //-------------------------------------------------------------- // Time: very slow unless I is very short and A(:,kA) is // very long. // This case can handle any kind of I, and A(:,kA) of any // properties. For a fine task, A(:,kA) has not been // sliced; I has been sliced instead. // If the I bucket inverse has not been created, this // method is the only option. Alternatively, if nI = // length (I) is << nnz (A (:,kA)), then scanning I and // doing a binary search of A (:,kA) is faster than doing a // linear-time search of A(:,kA) and a lookup into the I // bucket inverse. // The vector of C is constructed in sorted order, so no // sort is needed. // A(:,kA) has not been sliced. ASSERT (pA == Ap_start [kC]) ; ASSERT (pA_end == Ap_end [kC]) ; // scan I, in order, and search for the entry in A(:,kA) for (int64_t k = 0 ; k < ilen ; k++) { // C(inew,kC) = A (i,kA), if it exists. // i = I [inew] ; or from a colon expression int64_t inew = k + pI ; int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ; bool found ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; #if defined ( GB_SYMBOLIC ) bool is_zombie ; GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found, nzombies, is_zombie) ; #else GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ; #endif if (found) { ASSERT (i == GB_Ai (pleft)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pleft) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 7 : // I is ibegin:iinc:iend with iinc > 1 //-------------------------------------------------------------- // Time: 1 thread: C=A(1:2:n,:) is 3x slower than MATLAB // but has good speedup. About as fast as MATLAB with // enough threads. ASSERT (Ikind == GB_STRIDE && iinc > 1) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (ibegin <= i && i <= iend) ; i = i - ibegin ; if (i % iinc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / iinc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 8 : // I = ibegin:(-iinc):iend, with iinc < -1 //---------------------------------------------------------- // Time: 2x slower than MATLAB for iinc = -2 or -8. // Good speedup though. Faster than MATLAB for // large values (iinc = -128). ASSERT (Ikind == GB_STRIDE && iinc < -1) ; for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) present; see if it is in ibegin:iinc:iend int64_t i = GB_Ai (pA + k) ; ASSERT (iend <= i && i <= ibegin) ; i = ibegin - i ; if (i % inc == 0) { // i is in the sequence ibegin:iinc:iend #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else int64_t inew = i / inc ; ASSERT (pC < pC_end) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //---------------------------------------------------------- case 9 : // I = ibegin:(-1):iend //---------------------------------------------------------- // Time: much faster than MATLAB. Good speedup. ASSERT (Ikind == GB_STRIDE && iinc == -1) ; #if defined ( GB_PHASE_1_OF_2 ) clen = alen ; #else for (int64_t k = alen - 1 ; k >= 0 ; k--) { // A(i,kA) is present int64_t i = GB_Ai (pA + k) ; int64_t inew = (ibegin - i) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; } #endif break ; //-------------------------------------------------------------- case 10 : // I unsorted, and C needs qsort, duplicates OK //-------------------------------------------------------------- // Time: with one thread: 2x slower than MATLAB, probably // because of the qsort. Good speedup however. This used // if qsort is needed but ndupl == 0. Try a method that // needs qsort, but no duplicates? // Case 10 works well when I has many entries and A(:,kA) // has few entries. C(:,kC) must be sorted after this pass. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; if (!fine_task) { // a coarse task owns this entire C(:,kC) vector, so // the sort can be done now. The sort for vectors // handled by multiple fine tasks must wait until all // task are completed, below in the post sort. pC = Cp [kC] ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } #endif break ; //-------------------------------------------------------------- case 11 : // I not contiguous, with duplicates. No qsort needed //-------------------------------------------------------------- // Case 11 works well when I has many entries and A(:,kA) // has few entries. It requires that I be sorted on input, // so that no sort is required for C(:,kC). It is // otherwise identical to Case 9. ASSERT (Ikind == GB_LIST) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // traverse bucket i for all indices inew where // i == I [inew] or where i is from a colon expression GB_for_each_index_in_bucket (inew, i) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- case 12 : // I not contiguous, no duplicates. No qsort needed. //-------------------------------------------------------------- // Identical to Case 11, except GB_for_each_index_in_bucket // just needs to iterate 0 or 1 times. Works well when I // has many entries and A(:,kA) has few entries. ASSERT (Ikind == GB_LIST && nduplicates == 0) ; for (int64_t k = 0 ; k < alen ; k++) { // A(i,kA) present, look it up in the I inverse buckets int64_t i = GB_Ai (pA + k) ; // bucket i has at most one index inew such that // i == I [inew] int64_t inew = Mark [i] - 1 ; if (inew >= 0) { ASSERT (inew >= 0 && inew < nI) ; ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ; #if defined ( GB_PHASE_1_OF_2 ) clen++ ; #else Ci [pC] = inew ; GB_COPY_ENTRY (pC, pA + k) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif break ; //-------------------------------------------------------------- default:; //-------------------------------------------------------------- } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = clen ; } else { Cp [kC] = clen ; } #endif } } //-------------------------------------------------------------------------- // phase2: post sort for any vectors handled by fine tasks with method 10 //-------------------------------------------------------------------------- #if defined ( GB_PHASE_2_OF_2 ) if (post_sort) { int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t kC = TaskList [taskid].kfirst ; bool do_post_sort = (TaskList [taskid].len != 0) ; if (do_post_sort) { // This is the first fine task with method 10 for C(:,kC). The // vector C(:,kC) must be sorted, since method 10 left it with // unsorted indices. int64_t pC = Cp [kC] ; int64_t clen = Cp [kC+1] - pC ; GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1), GB_CSIZE2, clen) ; } } } #endif } #undef GB_Ai #undef GB_for_each_index_in_bucket #undef GB_COPY_RANGE #undef GB_COPY_ENTRY #undef GB_CTYPE #undef GB_CSIZE1 #undef GB_CSIZE2
GB_binop__eq_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__eq_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp64) // A*D function (colscale): GB (_AxD__eq_fp64) // D*A function (rowscale): GB (_DxB__eq_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp64) // C=scalar+B GB (_bind1st__eq_fp64) // C=scalar+B' GB (_bind1st_tran__eq_fp64) // C=A+scalar GB (_bind2nd__eq_fp64) // C=A'+scalar GB (_bind2nd_tran__eq_fp64) // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP64 || GxB_NO_EQ_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_int32 // op(A') function: GB_tran__abs_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_int32 ( int64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
masked-spgemm-inner.h
#ifndef MASKED_SPGEMM_MASKED_SPGEMM_INNER_H #define MASKED_SPGEMM_MASKED_SPGEMM_INNER_H #include "inner/InnerAlgorithm.h" template<template<class, class, bool> class RowAlgorithm, bool Complemented = false, class IT, class NT, class MultiplyOperation, class AddOperation> void MaskedSpGEMM1p(const CSR<IT, NT> &A, const CSC<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M, MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) { using RowAlg = RowAlgorithm<IT, NT, Complemented>; // Calculate number of threads and init C setNumThreads(numThreads); verifyInputs(A, B, C, M); // Estimate work IT *workPerRow = my_malloc<IT>(M.rows, false); IT work = calculateWork(A, B, M, workPerRow, numThreads); // Calculate cumulative work IT *cumulativeWork = my_malloc<IT>(M.rows, false); exclusiveScan(workPerRow, M.rows, cumulativeWork, numThreads); // Allocate memory for row sizes IT *rowNvals = my_malloc<IT>(M.rows, false); IT *threadsNvals = my_malloc<IT>(numThreads, false); // Allocate temporary memory for C's column IDs and values IT *colIds = my_malloc<IT>(M.nnz, false); NT *values = my_malloc<NT>(M.nnz, false); #pragma omp parallel num_threads(numThreads) { int thisThread = omp_get_thread_num(); RowAlg alg; // Distribute work auto[rowBeginIdx, rowEndIdx] = distributeWork(work, cumulativeWork, A.rows, numThreads, thisThread); // Get arrays for local colIDs and values IT *const colIdsLocal = colIds + M.rowptr[rowBeginIdx]; NT *const valuesLocal = values + M.rowptr[rowBeginIdx]; IT *currColId = colIdsLocal; NT *currValue = valuesLocal; // Numeric phase for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; } auto rowColIdBegin = currColId; alg.numericRow(A, B, M, multop, addop, row, currColId, currValue); rowNvals[row] = currColId - rowColIdBegin; } threadsNvals[thisThread] = currColId - colIdsLocal; #pragma omp barrier #pragma omp master { initC(A, B, C, threadsNvals, numThreads); } #pragma omp barrier setRowOffsets(C, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread); copyValuesToC(C, rowBeginIdx, colIdsLocal, valuesLocal, threadsNvals[thisThread]); } my_free(workPerRow, cumulativeWork, rowNvals, threadsNvals, colIds, values); } template<template<class, class, bool> class RowAlgorithm, bool Complemented = false, class IT, class NT, class MultiplyOperation, class AddOperation> void MaskedSpGEMM2p(const CSR<IT, NT> &A, const CSC<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M, MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) { using RowAlg = RowAlgorithm<IT, NT, Complemented>; // Calculate number of threads and init C setNumThreads(numThreads); verifyInputs(A, B, C, M); // Estimate work IT *workPerRow = my_malloc<IT>(M.rows, false); IT work = calculateWork(A, B, M, workPerRow, numThreads); // Calculate cumulative work IT *cumulativeWork = my_malloc<IT>(M.rows, false); exclusiveScan(workPerRow, M.rows, cumulativeWork, numThreads); // Allocate memory for row sizes IT *rowNvals = my_malloc<IT>(M.rows, false); IT *threadsNvals = my_malloc<IT>(numThreads, false); #pragma omp parallel num_threads(numThreads) { int thisThread = omp_get_thread_num(); RowAlg alg; // Distribute work auto[rowBeginIdx, rowEndIdx] = distributeWork(work, cumulativeWork, A.rows, numThreads, thisThread); // Symbolic phase IT nvals = 0; for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; continue; } alg.symbolicRow(A, B, M, row, rowNvals); nvals += rowNvals[row]; } threadsNvals[thisThread] = nvals; // init C #pragma omp barrier #pragma omp master { initC(A, B, C, threadsNvals, numThreads); } #pragma omp barrier setRowOffsets(C, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread); // Numeric phase IT *currColId = &C.colids[C.rowptr[rowBeginIdx]]; NT *currValue = &C.values[C.rowptr[rowBeginIdx]]; for (IT row = rowBeginIdx; row < rowEndIdx; ++row) { if (workPerRow[row] == 0) { rowNvals[row] = 0; continue; } auto rowColIdBegin = currColId; alg.numericRow(A, B, M, multop, addop, row, currColId, currValue); rowNvals[row] = currColId - rowColIdBegin; } } my_free(workPerRow, cumulativeWork, rowNvals, threadsNvals); } #endif //MASKED_SPGEMM_MASKED_SPGEMM_INNER_H
Tutorial.h
//================================================================================================= /*! // \file blaze/Tutorial.h // \brief Tutorial of the Blaze library // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_TUTORIAL_H_ #define _BLAZE_TUTORIAL_H_ //================================================================================================= // // BLAZE TUTORIAL // //================================================================================================= //**Mainpage*************************************************************************************** /*!\mainpage // // \image html blaze300x150.jpg // // This is the API for the \b Blaze high performance C++ math library. It gives a complete // overview of the individual features and sublibraries of \b Blaze. To get a first impression // on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards, // the following long tutorial covers the most important aspects of the \b Blaze math library. // The tabs at the top of the page allow a direct access to the individual modules, namespaces, // classes, and files of the \b Blaze library.\n\n // // \section table_of_content Table of Contents // // <ul> // <li> \ref configuration_and_installation </li> // <li> \ref getting_started </li> // <li> \ref vectors // <ul> // <li> \ref vector_types </li> // <li> \ref vector_operations </li> // </ul> // </li> // <li> \ref matrices // <ul> // <li> \ref matrix_types </li> // <li> \ref matrix_operations </li> // </ul> // </li> // <li> \ref adaptors // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices </li> // </ul> // </li> // <li> \ref views // <ul> // <li> \ref views_subvectors </li> // <li> \ref views_element_selections </li> // <li> \ref views_submatrices </li> // <li> \ref views_rows </li> // <li> \ref views_row_selections </li> // <li> \ref views_columns </li> // <li> \ref views_column_selections </li> // <li> \ref views_bands </li> // </ul> // </li> // <li> \ref arithmetic_operations // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication // <ul> // <li> \ref schur_product </li> // <li> \ref matrix_product </li> // </ul> // </li> // </ul> // </li> // <li> \ref shared_memory_parallelization // <ul> // <li> \ref openmp_parallelization </li> // <li> \ref cpp_threads_parallelization </li> // <li> \ref boost_threads_parallelization </li> // <li> \ref hpx_parallelization </li> // <li> \ref serial_execution </li> // </ul> // </li> // <li> \ref serialization // <ul> // <li> \ref vector_serialization </li> // <li> \ref matrix_serialization </li> // </ul> // </li> // <li> \ref customization // <ul> // <li> \ref configuration_files </li> // <li> \ref vector_and_matrix_customization // <ul> // <li> \ref custom_data_members </li> // <li> \ref custom_operations </li> // <li> \ref custom_data_types </li> // </ul> // </li> // <li> \ref error_reporting_customization </li> // </ul> // </li> // <li> \ref blas_functions </li> // <li> \ref lapack_functions </li> // <li> \ref block_vectors_and_matrices </li> // <li> \ref intra_statement_optimization </li> // <li> \ref faq </li> // <li> \ref issue_creation_guidelines </li> // <li> \ref blaze_references </li> // </ul> */ //************************************************************************************************* //**Configuration and Installation***************************************************************** /*!\page configuration_and_installation Configuration and Installation // // \tableofcontents // // // Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system // is a fairly easy two step process. In the following, this two step process is explained in // detail, preceded only by a short summary of the requirements. // // // \n \section requirements Requirements // <hr> // // For maximum performance the \b Blaze library expects you to have a BLAS library installed // (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>, // <a href="http://developer.amd.com/libraries/acml/">ACML</a>, // <a href="http://math-atlas.sourceforge.net">Atlas</a>, // <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't // have a BLAS library installed on your system, \b Blaze will still work and will not be reduced // in functionality, but performance may be limited. Thus it is strongly recommended to install a // BLAS library. // // Additionally, for computing the determinant of a dense matrix, for the decomposition of dense // matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular // values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either // of these features is used it is necessary to link the LAPACK library to the final executable. // If no LAPACK library is available the use of these features will result in a linker error. // // Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this // case the Boost library is required to be installed on your system. It is recommended to use the // newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If // you don't have Boost installed on your system, you can download it for free from // <a href="http://www.boost.org">www.boost.org</a>. // // // \n \section step_1_installation Step 1: Installation // <hr> // // \subsection step_1_cmake Installation via CMake // // The first step is the installation of the \b Blaze header files. The most convenient way // to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the // following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to // the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to // \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake. \code cmake -DCMAKE_INSTALL_PREFIX=/usr/local/ sudo make install \endcode // Windows users can do the same via the cmake-gui. Alternatively, it is possible to include // \b Blaze by adding the following lines in any \c CMakeLists.txt file: \code find_package( blaze ) if( blaze_FOUND ) add_library( blaze_target INTERFACE ) target_link_libraries( blaze_target INTERFACE blaze::blaze ) endif() \endcode // \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool // // An alternate way to install \b Blaze for Windows users is Microsoft's // <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can // be installed via the command line: \code C:\src\vcpkg> .\vcpkg install blaze \endcode // The tool automatically downloads the latest \b Blaze release and copies the header files to // the common include directory. Please note that since \b Blaze is a header-only library the // attempt to install any static or dynamic library will fail! // // \n \subsection step_1_installation_unix Manual Installation on Linux/macOS // // Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply // copied to a standard include directory (note that this requires root privileges): \code cp -r ./blaze /usr/local/include \endcode // Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the // \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be // searched after any directories specified on the command line with the option \c -I and // before the standard default directories (such as \c /usr/local/include and \c /usr/include). // Assuming a user named 'Jon', the environment variable can be set as follows: \code CPLUS_INCLUDE_PATH=/usr/home/jon/blaze export CPLUS_INCLUDE_PATH \endcode // Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the // command line. The following example demonstrates this by means of the GNU C++ compiler: \code g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp \endcode // \n \subsection step_1_installation_windows Manual Installation on Windows // // Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be // copied to any other directory or simply left in the default \b Blaze directory. However, the // chosen include directory has to be explicitly specified as include path. In Visual Studio, // this is done via the project property pages, configuration properties, C/C++, General settings. // Here the additional include directories can be specified. // // // \n \section step_2_configuration Step 2: Configuration // <hr> // // The second step is the configuration and customization of the \b Blaze library. Many aspects // of \b Blaze can be adapted to specific requirements, environments and architectures. The most // convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt> // subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header // files can be customized manually. In both cases, however, the files are modified. If this is // not an option it is possible to configure \b Blaze via the command line (see the tutorial // section \ref configuration_files or the documentation in the configuration files). // // Since the default settings are reasonable for most systems this step can also be skipped. // However, in order to achieve maximum performance a customization of at least the following // configuration files is required: // // - <b><tt><blaze/config/BLAS.h></tt></b>: Via this configuration file \b Blaze can be enabled // to use a third-party BLAS library for several basic linear algebra functions (such as for // instance dense matrix multiplications). In case no BLAS library is used, all linear algebra // functions use the default implementations of the \b Blaze library and therefore BLAS is not a // requirement for the compilation process. However, please note that performance may be limited. // - <b><tt><blaze/config/CacheSize.h></tt></b>: This file contains the hardware specific cache // settings. \b Blaze uses this information to optimize its cache usage. For maximum performance // it is recommended to adapt these setting to a specific target architecture. // - <b><tt><blaze/config/Thresholds.h></tt></b>: This file contains all thresholds for the // customization of the \b Blaze compute kernels. In order to tune the kernels for a specific // architecture and to maximize performance it can be necessary to adjust the thresholds, // especially for a parallel execution (see \ref shared_memory_parallelization). // // For an overview of other customization options and more details, please see the section // \ref configuration_files. // // // \n \section blaze_version Blaze Version // <hr> // // The current major and minor version number of the \b Blaze library can be found in the // <b><tt><blaze/system/Version.h></tt></b> header file. It is automatically included via the // <b><tt><blaze/Blaze.h></tt></b> header file. The file contains the two following macros, // which can for instance be used for conditional compilation: \code #define BLAZE_MAJOR_VERSION 3 #define BLAZE_MINOR_VERSION 2 \endcode // \n Next: \ref getting_started */ //************************************************************************************************* //**Getting Started******************************************************************************** /*!\page getting_started Getting Started // // This short tutorial serves the purpose to give a quick overview of the way mathematical // expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following // long tutorial covers the most important aspects of the \b Blaze math library. // // // \n \section getting_started_vector_example A First Example // // \b Blaze is written such that using mathematical expressions is as close to mathematical // textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly // easiest solution is the right solution and most users experience no problems when trying to // use \b Blaze in the most natural way. The following example gives a first impression of the // formulation of a vector addition in \b Blaze: \code #include <iostream> #include <blaze/Math.h> using blaze::StaticVector; using blaze::DynamicVector; // Instantiation of a static 3D column vector. The vector is directly initialized as // ( 4 -2 5 ) StaticVector<int,3UL> a{ 4, -2, 5 }; // Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to // ( 2 5 -3 ) DynamicVector<int> b( 3UL ); b[0] = 2; b[1] = 5; b[2] = -3; // Adding the vectors a and b DynamicVector<int> c = a + b; // Printing the result of the vector addition std::cout << "c =\n" << c << "\n"; \endcode // Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header // file. Alternatively, the entire \b Blaze library, including both the math and the entire // utility module, can be included via the \c blaze/Blaze.h header file. Also note that all // classes and functions of \b Blaze are contained in the blaze namespace.\n\n // // Assuming that this program resides in a source file called \c FirstExample.cpp, it can be // compiled for instance via the GNU C++ compiler: \code g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp \endcode // Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum // performance, it is necessary to compile the program in release mode, which deactivates // all debugging functionality inside \b Blaze. It is also strongly recommended to specify // the available architecture specific instruction set (as for instance the AVX instruction // set, which if available can be activated via the \c -mavx flag). This allows \b Blaze // to optimize computations via vectorization.\n\n // // When running the resulting executable \c FirstExample, the output of the last line of // this small program is \code c = 6 3 2 \endcode // \n \section getting_started_matrix_example An Example Involving Matrices // // Similarly easy and intuitive are expressions involving matrices: \code #include <blaze/Math.h> using namespace blaze; // Instantiating a dynamic 3D column vector DynamicVector<int> x{ 4, -1, 3 }; // Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call // operator three values of the matrix are explicitly set to get the matrix // ( 1 0 4 ) // ( 0 -2 0 ) DynamicMatrix<int> A( 2UL, 3UL, 0 ); A(0,0) = 1; A(0,2) = 4; A(1,1) = -2; // Performing a matrix/vector multiplication DynamicVector<int> y = A * x; // Printing the resulting vector std::cout << "y =\n" << y << "\n"; // Instantiating a static column-major matrix. The matrix is directly initialized as // ( 3 -1 ) // ( 0 2 ) // ( -1 0 ) StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } }; // Performing a matrix/matrix multiplication DynamicMatrix<int> C = A * B; // Printing the resulting matrix std::cout << "C =\n" << C << "\n"; \endcode // The output of this program is \code y = 16 2 C = ( -1 -1 ) ( 0 -4 ) \endcode // \n \section getting_started_complex_example A Complex Example // // The following example is much more sophisticated. It shows the implementation of the Conjugate // Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the // \b Blaze library: // // \image html cg.jpg // // In this example it is not important to understand the CG algorithm itself, but to see the // advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a // sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$ // unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical // formulation and therefore has huge advantages in terms of readability and maintainability, // while the performance of the code is close to the expected theoretical peak performance: \code const size_t NN( N*N ); blaze::CompressedMatrix<double,rowMajor> A( NN, NN ); blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN ); double alpha, beta, delta; // ... Initializing the sparse matrix A // Performing the CG algorithm r = b - A * x; p = r; delta = (r,r); for( size_t iteration=0UL; iteration<iterations; ++iteration ) { Ap = A * p; alpha = delta / (p,Ap); x += alpha * p; r -= alpha * Ap; beta = (r,r); if( std::sqrt( beta ) < 1E-8 ) break; p = r + ( beta / delta ) * p; delta = beta; } \endcode // \n Hopefully this short tutorial gives a good first impression of how mathematical expressions // are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types, // will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and // matrix types, all possible operations on vectors and matrices, and of course all possible // mathematical expressions. // // \n Previous: \ref configuration_and_installation &nbsp; &nbsp; Next: \ref vectors */ //************************************************************************************************* //**Vectors**************************************************************************************** /*!\page vectors Vectors // // \tableofcontents // // // \n \section vectors_general General Concepts // <hr> // // The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector, // \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector) // and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified // as either column vectors or row vectors: \code using blaze::DynamicVector; using blaze::columnVector; using blaze::rowVector; // Setup of the 3-dimensional dense column vector // // ( 1 ) // ( 2 ) // ( 3 ) // DynamicVector<int,columnVector> a{ 1, 2, 3 }; // Setup of the 3-dimensional dense row vector // // ( 4 5 6 ) // DynamicVector<int,rowVector> b{ 4, 5, 6 }; \endcode // Per default, all vectors in \b Blaze are column vectors: \code // Instantiation of a 3-dimensional column vector blaze::DynamicVector<int> c( 3UL ); \endcode // \n \section vectors_details Vector Details // <hr> // // - \ref vector_types // - \ref vector_operations // // // \n \section vectors_examples Examples // <hr> \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowVector; using blaze::columnVector; StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector // ... Resizing and initialization c = a + trans( b ); \endcode // \n Previous: \ref getting_started &nbsp; &nbsp; Next: \ref vector_types */ //************************************************************************************************* //**Vector Types*********************************************************************************** /*!\page vector_types Vector Types // // \tableofcontents // // // \n \section vector_types_static_vector StaticVector // <hr> // // The blaze::StaticVector class template is the representation of a fixed size vector with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class StaticVector; \endcode // - \c Type: specifies the type of the vector elements. StaticVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the total number of vector elements. It is expected that StaticVector is // only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at // compile time: \code // Definition of a 3-dimensional integral column vector blaze::StaticVector<int,3UL> a; // Definition of a 4-dimensional single precision column vector blaze::StaticVector<float,4UL,blaze::columnVector> b; // Definition of a 6-dimensional double precision row vector blaze::StaticVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_dynamic_vector DynamicVector // <hr> // // The blaze::DynamicVector class template is the representation of an arbitrary sized vector // with dynamically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/DynamicVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class DynamicVector; \endcode // - \c Type: specifies the type of the vector elements. DynamicVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best // choice for medium to large vectors. Its size can be modified at runtime: \code // Definition of a 3-dimensional integral column vector blaze::DynamicVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector blaze::DynamicVector<float,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 blaze::DynamicVector<double,blaze::rowVector> c; \endcode // \n \section vector_types_hybrid_vector HybridVector // <hr> // // The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and // the blaze::DynamicVector class templates. It represents a fixed size vector with statically // allocated elements, but still can be dynamically resized (within the bounds of the available // memory). It can be included via the header file \code #include <blaze/math/HybridVector.h> \endcode // The type of the elements, the number of elements, and the transpose flag of the vector can // be specified via the three template parameters: \code template< typename Type, size_t N, bool TF > class HybridVector; \endcode // - \c Type: specifies the type of the vector elements. HybridVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c N : specifies the maximum number of vector elements. It is expected that HybridVector // is only used for tiny and small vectors. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not // known at compile time or not fixed at runtime, but whose maximum size is known at compile // time: \code // Definition of a 3-dimensional integral column vector with a maximum size of 6 blaze::HybridVector<int,6UL> a( 3UL ); // Definition of a 4-dimensional single precision column vector with a maximum size of 16 blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL ); // Definition of a double precision row vector with size 0 and a maximum size of 6 blaze::HybridVector<double,6UL,blaze::rowVector> c; \endcode // \n \section vector_types_custom_vector CustomVector // <hr> // // The blaze::CustomVector class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data // structure. Thus in contrast to all other dense vector types a custom vector does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom vector can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomVector.h> \endcode // The type of the elements, the properties of the given array of elements and the transpose // flag of the vector can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool TF > class CustomVector; \endcode // - Type: specifies the type of the vector elements. blaze::CustomVector can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CustomVector is the right choice if any external array needs to be represented as // a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomVector<int,unaligned,unpadded,columnVector>; std::vector<int> vec( 7UL ); UnalignedUnpadded a( &vec[0], 7UL ); // Definition of a managed custom column vector for unaligned but padded 'float' arrays using UnalignedPadded = CustomVector<float,unaligned,padded,columnVector>; std::unique_ptr<float[]> memory1( new float[16] ); UnalignedPadded b( memory1.get(), 9UL, 16UL ); // Definition of a managed custom row vector for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomVector<double,aligned,unpadded,rowVector>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 7UL ) ); AlignedUnpadded c( memory2.get(), 7UL ); // Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomVector<cplx,aligned,padded,columnVector>; std::unique_ptr<cplx[],Deallocate> memory3( allocate<cplx>( 8UL ) ); AlignedPadded d( memory3.get(), 5UL, 8UL ); \endcode // In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several // special characteristics. All of these result from the fact that a custom vector is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref vector_types_custom_vector_memory_management</b> // -# <b>\ref vector_types_custom_vector_copy_operations</b> // -# <b>\ref vector_types_custom_vector_alignment</b> // -# <b>\ref vector_types_custom_vector_padding</b> // // \n \subsection vector_types_custom_vector_memory_management Memory Management // // The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // vector data structure. However, this flexibility comes with the price that the user of a custom // vector is responsible for the resource management. // // The following examples give an impression of several possible types of custom vectors: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3-dimensional custom vector with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom vector! std::vector<int> vec( 3UL ); CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL ); // Definition of a custom vector with size 3 and capacity 16 with aligned, padded and // externally managed integer array. Note that the std::unique_ptr must be guaranteed // to outlive the custom vector! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) ); CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL ); \endcode // \n \subsection vector_types_custom_vector_copy_operations Copy Operations // // As with all dense vectors it is possible to copy construct a custom vector: \code using blaze::CustomVector; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomVector<int,unaligned,unpadded>; std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10 CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector a[1] = 20; // Also modifies the std::vector CustomType b( a ); // Creating a copy of vector a b[2] = 20; // Also affects vector a and the std::vector \endcode // It is important to note that a custom vector acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom vector that is referencing and representing // the same array as the original custom vector. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom vector, but modifies the values of the array: \code std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4 CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector a = c; // Copy assignment: Set all values of vector a and b to 4. \endcode // \n \subsection vector_types_custom_vector_alignment Alignment // // In case the custom vector is specified as \c aligned the passed array must be guaranteed to // be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For // instance, if AVX is active an array of integers must be 32-bit aligned: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) ); CustomVector<int,aligned,unpadded> a( memory.get(), 5UL ); \endcode // In case the alignment requirements are violated, a \c std::invalid_argument exception is // thrown. // // \n \subsection vector_types_custom_vector_padding Padding // // Adding padding elements to the end of an array can have a significant impact on the performance. // For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors // of double precision values can be added via a single SIMD addition operation: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomVector<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) ); // Creating padded custom vectors of size 3 and a capacity of 4 CustomType a( memory1.get(), 3UL, 4UL ); CustomType b( memory2.get(), 3UL, 4UL ); CustomType c( memory3.get(), 3UL, 4UL ); // ... Initialization c = a + b; // AVX-based vector addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted, a scalar addition has to be used: \code using blaze::CustomVector; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomVector<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) ); // Creating unpadded custom vector of size 3 CustomType a( allocate<double>( 3UL ), 3UL ); CustomType b( allocate<double>( 3UL ), 3UL ); CustomType c( allocate<double>( 3UL ), 3UL ); // ... Initialization c = a + b; // Scalar vector addition \endcode // Note the different number of constructor parameters for unpadded and padded custom vectors: // In contrast to unpadded vectors, where during the construction only the size of the array // has to be specified, during the construction of a padded custom vector it is additionally // necessary to explicitly specify the capacity of the array. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom vector the added padding elements must // guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector // width. In case of unaligned padded vectors the number of padding elements can be greater or // equal the number of padding elements of an aligned padded custom vector. In case the padding // is insufficient with respect to the available instruction set, a \a std::invalid_argument // exception is thrown. // // Please also note that \b Blaze will zero initialize the padding elements in order to achieve // maximum performance! // // // \n \section vector_types_compressed_vector CompressedVector // <hr> // // The blaze::CompressedVector class is the representation of an arbitrarily sized sparse // vector, which stores only non-zero elements of arbitrary type. It can be included via the // header file \code #include <blaze/math/CompressedVector.h> \endcode // The type of the elements and the transpose flag of the vector can be specified via the two // template parameters: \code template< typename Type, bool TF > class CompressedVector; \endcode // - \c Type: specifies the type of the vector elements. CompressedVector can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column // vector (\c blaze::columnVector). The default value is \c blaze::columnVector. // // The blaze::CompressedVector is the right choice for all kinds of sparse vectors: \code // Definition of a 3-dimensional integral column vector blaze::CompressedVector<int> a( 3UL ); // Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL ); // Definition of a double precision row vector with size 0 blaze::CompressedVector<double,blaze::rowVector> c; \endcode // \n Previous: \ref vectors &nbsp; &nbsp; Next: \ref vector_operations */ //************************************************************************************************* //**Vector Operations****************************************************************************** /*!\page vector_operations Vector Operations // // \tableofcontents // // // \n \section vector_operations_constructors Constructors // <hr> // // Instantiating and setting up a vector is very easy and intuitive. However, there are a few // rules to take care of: // - In case the last template parameter (the transpose flag) is omitted, the vector is per // default a column vector. // - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection vector_operations_default_construction Default Construction \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::CompressedVector; // All vectors can be default constructed. Whereas the size // of StaticVectors is fixed via the second template parameter, // the initial size of a default constructed DynamicVector or // CompressedVector is 0. StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector. // All elements are initialized to 0. StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector. // Again, all elements are initialized to 0L. DynamicVector<float> v3; // Instantiation of a dynamic single precision column // vector of size 0. DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row // vector of size 0. CompressedVector<int> v5; // Instantiation of a compressed integer column // vector of size 0. CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row // vector of size 0. \endcode // \n \subsection vector_operations_size_construction Construction with Specific Size // // The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that // allows to immediately give the vector the required size. Whereas both dense vectors (i.e. // \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector // elements, \c CompressedVector merely acquires the size but remains empty. \code DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector // of size 9. The elements are NOT initialized! HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single // precision complex values. The elements are // default constructed. CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with // size 10. Initially, the vector provides no // capacity for non-zero elements. \endcode // \n \subsection vector_operations_initialization_constructors Initialization Constructors // // All dense vector classes offer a constructor that allows for a direct, homogeneous initialization // of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements // can be specified \code StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector. // All elements are initialized to 2. DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision // column vector of size 3. All elements are // set to 7.0F. CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column // vector of size 15, which provides enough // space for at least 3 non-zero elements. \endcode // \n \subsection vector_operations_array_construction Array Construction // // Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic // or static array. If the vector is initialized from a dynamic array, the constructor expects the // actual size of the array as first argument, the array as second argument. In case of a static // array, the fixed size of the array is used: \code const unique_ptr<double[]> array1( new double[2] ); // ... Initialization of the dynamic array blaze::StaticVector<double,2UL> v13( 2UL, array1.get() ); int array2[4] = { 4, -5, -6, 7 }; blaze::StaticVector<int,4UL> v14( array2 ); \endcode // \n \subsection vector_operations_initializer_list_construction Initializer List Construction // // In addition, all dense and sparse vector classes can be directly initialized by means of an // initializer list: \code blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F }; blaze::CompressedVector<int> v16{ 0, 2, 0, 0, 5, 0, 7, 0 }; \endcode // In case of sparse vectors, only the non-zero elements are used to initialize the vector. // // \n \subsection vector_operations_copy_construction Copy Construction // // All dense and sparse vectors can be created as the copy of any other dense or sparse vector // with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector). \code StaticVector<int,9UL,columnVector> v17( v7 ); // Instantiation of the dense column vector v17 // as copy of the dense column vector v7. DynamicVector<int,rowVector> v18( v9 ); // Instantiation of the dense row vector v18 as // copy of the sparse row vector v9. CompressedVector<int,columnVector> v19( v1 ); // Instantiation of the sparse column vector v19 // as copy of the dense column vector v1. CompressedVector<float,rowVector> v20( v12 ); // Instantiation of the sparse row vector v20 as // copy of the row vector v12. \endcode // Note that it is not possible to create a \c StaticVector as a copy of a vector with a different // size: \code StaticVector<int,5UL,columnVector> v21( v7 ); // Runtime error: Size does not match! StaticVector<int,4UL,rowVector> v22( v10 ); // Compile time error: Size does not match! \endcode // \n \section vector_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse vectors: // \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment, // \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment. // // \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment // // Sometimes it may be necessary to assign the same value to all elements of a dense vector. // For this purpose, the assignment operator can be used: \code blaze::StaticVector<int,3UL> v1; blaze::DynamicVector<double> v2; // Setting all integer elements of the StaticVector to 2 v1 = 2; // Setting all double precision elements of the DynamicVector to 5.0 v2 = 5.0; \endcode // \n \subsection vector_operations_array_assignment Array Assignment // // Dense vectors can also be assigned a static array: \code blaze::StaticVector<float,2UL> v1; blaze::DynamicVector<double,rowVector> v2; float array1[2] = { 1.0F, 2.0F }; double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 }; v1 = array1; v2 = array2; \endcode // \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // vector: \code blaze::DynamicVector<float> v1; blaze::CompressedVector<double,rowVector> v2; v1 = { 1.0F, 2.0F }; v2 = { 2.1, 0.0, -1.7, 0.0, -7.2 }; \endcode // In case of sparse vectors, only the non-zero elements are considered. // // \n \subsection vector_operations_copy_assignment Copy Assignment // // For all vector types it is generally possible to assign another vector with the same transpose // flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the // assigned vector is required to have the same size as the \c StaticVector since the size of a // \c StaticVector cannot be adapted! \code blaze::StaticVector<int,3UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 3UL ); blaze::DynamicVector<float,columnVector> v3( 5UL ); blaze::CompressedVector<int,columnVector> v4( 3UL ); blaze::CompressedVector<float,rowVector> v5( 3UL ); // ... Initialization of the vectors v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector v1 = v5; // Compilation error: Cannot assign a row vector to a column vector \endcode // \n \subsection vector_operations_compound_assignment Compound Assignment // // Next to plain assignment, it is also possible to use addition assignment, subtraction // assignment, and multiplication assignment. Note however, that in contrast to plain assignment // the size and the transpose flag of the vectors has be to equal in order to able to perform a // compound assignment. \code blaze::StaticVector<int,5UL,columnVector> v1; blaze::DynamicVector<int,columnVector> v2( 5UL ); blaze::CompressedVector<float,columnVector> v3( 7UL ); blaze::DynamicVector<float,rowVector> v4( 7UL ); blaze::CompressedVector<float,rowVector> v5( 7UL ); // ... Initialization of the vectors v1 += v2; // OK: Addition assignment between two column vectors of the same size v1 += v3; // Runtime error: No compound assignment between vectors of different size v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size \endcode // \n \section vector_operations_element_access Element Access // <hr> // // \n \subsection vector_operations_subscript_operator_1 Subscript Operator // // The easiest and most intuitive way to access a dense or sparse vector is via the subscript // operator. The indices to access a vector are zero-based: \code blaze::DynamicVector<int> v1( 5UL ); v1[0] = 1; v1[1] = 3; // ... blaze::CompressedVector<float> v2( 5UL ); v2[2] = 7.3F; v2[4] = -1.4F; \endcode // Whereas using the subscript operator on a dense vector only accesses the already existing // element, accessing an element of a sparse vector via the subscript operator potentially // inserts the element into the vector and may therefore be more expensive. Consider the // following example: \code blaze::CompressedVector<int> v1( 10UL ); for( size_t i=0UL; i<v1.size(); ++i ) { ... = v1[i]; } \endcode // Although the compressed vector is only used for read access within the for loop, using the // subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore the // preferred way to traverse the non-zero elements of a sparse vector is to use iterators. // // \n \subsection vector_operations_iterators Iterators // // All vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end(), and \c cend() functions to traverse the currently contained elements by iterators. // In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a // manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or // \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedVector; CompressedVector<int> v1( 10UL ); // ... Initialization of the vector // Traversing the vector by Iterator for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } // Traversing the vector by ConstIterator for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) { // ... } for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) { // ... } \endcode // \n \section vector_operations_element_insertion Element Insertion // <hr> // // In contrast to dense vectors, that store all elements independent of their value and that // offer direct access to all elements, spares vectors only store the non-zero elements contained // in the vector. Therefore it is necessary to explicitly add elements to the vector. // // \n \subsection vector_operations_subscript_operator_2 Subscript Operator // // The first option to add elements to a sparse vector is the subscript operator: \code using blaze::CompressedVector; CompressedVector<int> v1( 3UL ); v1[1] = 2; \endcode // In case the element at the given index is not yet contained in the vector, it is automatically // inserted. Otherwise the old value is replaced by the new value 2. The operator returns a // reference to the sparse vector element. // // \n \subsection vector_operations_set .set() // // An alternative to the subscript operator is the \c set() function: In case the element is not // yet contained in the vector the element is inserted, else the element's value is modified: \code // Insert or modify the value at index 3 v1.set( 3, 1 ); \endcode // \n \subsection vector_operations_insert .insert() // // The insertion of elements can be better controlled via the \c insert() function. In contrast to // the subscript operator and the \c set() function it emits an exception in case the element is // already contained in the vector. In order to check for this case, the \c find() function can be // used: \code // In case the element at index 4 is not yet contained in the matrix it is inserted // with a value of 6. if( v1.find( 4 ) == v1.end() ) v1.insert( 4, 6 ); \endcode // \n \subsection vector_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not suited // for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill // a sparse vector is the \c append() function. It requires the sparse vector to provide enough // capacity to insert a new element. Additionally, the index of the new element must be larger // than the index of the previous element. Violating these conditions results in undefined // behavior! \code v1.reserve( 10 ); // Reserving space for 10 non-zero elements v1.append( 5, -2 ); // Appending the element -2 at index 5 v1.append( 6, 4 ); // Appending the element 4 at index 6 // ... \endcode // \n \section vector_operations_element_removal Element Removal // <hr> // // \subsection vector_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse vector. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedVector; CompressedVector<int> v( 42 ); // ... Initialization of the vector // Erasing the element at index 21 v.erase( 21 ); // Erasing a single element via iterator v.erase( v.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] v.erase( v.lowerBound( 7 ), v.upperBound( 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate v.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] with a value larger than 5 v.erase( v.lowerBound( 30 ), v.upperBound( 40 ), []( int i ){ return i > 5; } ); \endcode // \n \section vector_operations_element_lookup Element Lookup // <hr> // // A sparse vector only stores the non-zero elements contained in the vector. Therefore, whenever // accessing a vector element at a specific index a lookup operation is required. Whereas the // subscript operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection vector_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in a sparse // vector. It specifically searches for the element at the given index. In case the element is // found, the function returns an iterator to the element. Otherwise an iterator just past the // last non-zero element of the compressed vector (the \c end() iterator) is returned. Note that // the returned iterator is subject to invalidation due to inserting operations via the subscript // operator, the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the element at index 7. In case the element is not // contained in the vector, the end() iterator is returned. CompressedVector<int>::Iterator pos( a.find( 7 ) ); if( pos != a.end( 7 ) ) { // ... } \endcode // \n \subsection vector_operations_lowerbound .lowerBound() // // The \c lowerBound() function returns an iterator to the first element with an index not less // then the given index. In combination with the \c upperBound() function this function can be // used to create a pair of iterators specifying a range of indices. Note that the returned // iterator is subject to invalidation due to inserting operations via the subscript operator, // the \c set() function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \subsection vector_operations_upperbound .upperBound() // // The \c upperBound() function returns an iterator to the first element with an index greater then // the given index. In combination with the \c lowerBound() function this function can be used to // create a pair of iterators specifying a range of indices. Note that the returned iterator is // subject to invalidation due to inserting operations via the subscript operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedVector; CompressedVector<int> a( 42 ); // ... Initialization of the vector // Searching the lower bound of index 17. CompressedVector<int>::Iterator pos1( A.lowerBound( 17 ) ); // Searching the upper bound of index 28 CompressedVector<int>::Iterator pos2( A.upperBound( 28 ) ); // Erasing all elements in the specified range a.erase( pos1, pos2 ); \endcode // \n \section vector_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection vector_operations_size .size() // // Via the \c size() member function, the current size of a dense or sparse vector can be queried: \code // Instantiating a dynamic vector with size 10 blaze::DynamicVector<int> v1( 10UL ); v1.size(); // Returns 10 // Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements blaze::CompressedVector<double> v2( 12UL, 3UL ); v2.size(); // Returns 12 \endcode // Alternatively, the free function \c size() can be used to query to current size of a vector. // In contrast to the member function, the free function can also be used to query the size of // vector expressions: \code size( v1 ); // Returns 10, i.e. has the same effect as the member function size( v2 ); // Returns 12, i.e. has the same effect as the member function blaze::DynamicMatrix<int> A( 15UL, 12UL ); size( A * v2 ); // Returns 15, i.e. the size of the resulting vector \endcode // \n \subsection vector_operations_capacity .capacity() // // Via the \c capacity() (member) function the internal capacity of a dense or sparse vector // can be queried. Note that the capacity of a vector doesn't have to be equal to the size // of a vector. In case of a dense vector the capacity will always be greater or equal than // the size of the vector, in case of a sparse vector the capacity may even be less than // the size. \code v1.capacity(); // Returns at least 10 \endcode // For symmetry reasons, there is also a free function /c capacity() available that can be used // to query the capacity: \code capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function \endcode // Note, however, that it is not possible to query the capacity of a vector expression: \code capacity( A * v1 ); // Compilation error! \endcode // \n \subsection vector_operations_nonzeros .nonZeros() // // For both dense and sparse vectors the number of non-zero elements can be determined via the // \c nonZeros() member function. Sparse vectors directly return their number of non-zero // elements, dense vectors traverse their elements and count the number of non-zero elements. \code v1.nonZeros(); // Returns the number of non-zero elements in the dense vector v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector \endcode // There is also a free function \c nonZeros() available to query the current number of non-zero // elements: \code nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in // a vector expression. However, the result is not the exact number of non-zero elements, but // may be a rough estimation: \code nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression \endcode // \n \subsection vector_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse vector for non-a-number // elements: \code blaze::DynamicVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization if( isnan( a ) ) { ... } \endcode // If at least one element of the vector is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for vectors with floating point // elements. The attempt to use it for a vector with a non-floating point element type results in // a compile time error. // // // \n \subsection vector_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse vector is in default state: \code blaze::HybridVector<int,20UL> a; // ... Resizing and initialization if( isDefault( a ) ) { ... } \endcode // A vector is in default state if it appears to just have been default constructed. All resizable // vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are // in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all // subvectors, element selections, rows, and columns) is in default state if all its elements are // in default state. For instance, in case the vector is instantiated for a built-in integral or // floating point data type, the function returns \c true in case all vector elements are 0 and // \c false in case any vector element is not 0. // // // \n \subsection vector_operations_isUniform isUniform() // // In order to check if all vector elements are identical, the \c isUniform function can be used: \code blaze::DynamicVector<int> a; // ... Resizing and initialization if( isUniform( a ) ) { ... } \endcode // Note that in case of sparse vectors also the zero elements are also taken into account! // // // \n \subsection vector_operations_length length() / sqrLength() // // In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length() // and \c sqrLength() function can be used: \code blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F }; const float len = length ( v ); // Computes the current length of the vector const float sqrlen = sqrLength( v ); // Computes the square length of the vector \endcode // Note that both functions can only be used for vectors with built-in or complex element type! // // // \n \subsection vector_operations_vector_trans trans() // // As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors // (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However, // vectors can be transposed via the \c trans() function: \code blaze::DynamicVector<int,columnVector> v1( 4UL ); blaze::CompressedVector<int,rowVector> v2( 4UL ); v1 = v2; // Compilation error: Cannot assign a row vector to a column vector v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it // to the column vector v1 v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2 v1 += trans( v2 ); // OK: Addition assignment of two column vectors \endcode // \n \subsection vector_operations_ctrans ctrans() // // It is also possible to compute the conjugate transpose of a vector. This operation is available // via the \c ctrans() function: \code blaze::CompressedVector< complex<float>, rowVector > v1( 4UL ); blaze::DynamicVector< complex<float>, columnVector > v2( 4UL ); v1 = ctrans( v2 ); // Compute the conjugate transpose vector \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector \endcode // \n \subsection vector_operations_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given vector expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a dense and a sparse vector: \code using blaze::DynamicVector; using blaze::CompressedVector; blaze::DynamicVector<double> a; blaze::CompressedVector<double> b; // ... Resizing and initialization auto c = evaluate( a * b ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary vector is created and no copy operation is performed. Instead, the result // is directly written to the target vector due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code CompressedVector<double> d( a * b ); // No temporary & no copy operation DynamicVector<double> e( a * b ); // Temporary & copy operation d = evaluate( a * b ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicVector<double> a, b, c, d; d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector d = a + eval( b * c ); // No creation of a temporary vector \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section vector_operations_modifying_operations Modifying Operations // <hr> // // \subsection vector_operations_resize_reserve .resize() / .reserve() // // The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector // cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as // \c CompressedVectors can be changed via the \c resize() function: \code using blaze::DynamicVector; using blaze::CompressedVector; DynamicVector<int,columnVector> v1; CompressedVector<int,rowVector> v2( 4 ); v2[1] = -2; v2[3] = 11; // Adapting the size of the dynamic and compressed vectors. The (optional) second parameter // specifies whether the existing elements should be preserved. Per default, the existing // elements are preserved. v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain // uninitialized, elements of class type are default constructed. v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the // new elements are NOT initialized! v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved. v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost. \endcode // Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors) // on the vector: \code blaze::DynamicVector<int,rowVector> v1( 10UL ); // Creating a dynamic vector of size 10 auto sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6] v1.resize( 6UL ); // Resizing the vector invalidates the view \endcode // When the internal capacity of a vector is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicVector<int> v1; v1.reserve( 100 ); v1.size(); // Returns 0 v1.capacity(); // Returns at least 100 \endcode // Note that the size of the vector remains unchanged, but only the internal capacity is set // according to the specified value! // // \n \subsection vector_operations_shrinkToFit .shrinkToFit() // // The internal capacity of vectors with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers v1.resize( 10UL ); // Resize to 10, but the capacity is preserved v1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c size(). Please // also note that in case a reallocation occurs, all iterators (including \c end() iterators), all // pointers and references to elements of the vector are invalidated. // // \subsection vector_operations_reset_clear reset() / clear() // // In order to reset all elements of a vector, the \c reset() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with 2.0F. blaze::DynamicVector<float> v1( 3UL, 2.0F ); // Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged. reset( v1 ); // Resetting all elements v1.size(); // Returns 3: size and capacity remain unchanged \endcode // In order to return a vector to its default state (i.e. the state of a default constructed // vector), the \c clear() function can be used: \code // Setup of a single precision column vector, whose elements are initialized with -1.0F. blaze::DynamicVector<float> v1( 5, -1.0F ); // Resetting the entire vector. clear( v1 ); // Resetting the entire vector v1.size(); // Returns 0: size is reset, but capacity remains unchanged \endcode // Note that resetting or clearing both dense and sparse vectors does not change the capacity // of the vectors. // // // \n \subsection vector_operations_swap swap() // // Via the \c swap() function it is possible to completely swap the contents of two vectors of // the same type: \code blaze::DynamicVector<int,columnVector> v1( 10UL ); blaze::DynamicVector<int,columnVector> v2( 20UL ); swap( v1, v2 ); // Swapping the contents of v1 and v2 \endcode // \n \section vector_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection vector_operations_normalize normalize() // // The \c normalize() function can be used to scale any non-zero vector to a length of 1. In // case the vector does not contain a single non-zero element (i.e. is a zero vector), the // \c normalize() function returns a zero vector. \code blaze::DynamicVector<float,columnVector> v1( 10UL ); blaze::CompressedVector<double,columnVector> v2( 12UL ); v1 = normalize( v1 ); // Normalizing the dense vector v1 length( v1 ); // Returns 1 (or 0 in case of a zero vector) v1 = normalize( v2 ); // Assigning v1 the normalized vector v2 length( v1 ); // Returns 1 (or 0 in case of a zero vector) \endcode // Note that the \c normalize() function only works for floating point vectors. The attempt to // use it for an integral vector results in a compile time error. // // // \n \subsection vector_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single vector, the functions return the smallest and largest element of the given // dense vector or the smallest and largest non-zero element of the given sparse vector, // respectively: \code blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 }; min( a ); // Returns -5 max( a ); // Returns 7 \endcode \code blaze::CompressedVector<int> b{ 1, 0, 3, 0 }; min( b ); // Returns 1 max( b ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref vector_operations_reduction_operations section. // // If passed two or more dense vectors, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given vectors, respectively: \code blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 }; blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 }; min( a, c ); // Results in the vector ( -5, 1, -7, -4 ) max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 ) \endcode // Please note that sparse vectors can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a vector expression: \code min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector min( a + c, c - d ); // Results in ( -10 -2 -7 0 ) max( a - c, c + d ); // Results in ( 0 4 14 6 ) \endcode // \n \subsection vector_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense vector can be computed via \c softmax(). // The resulting dense vector consists of real values in the range (0..1], which add up to 1. \code blaze::StaticVector<double,7UL,rowVector> x{ 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0 }; blaze::StaticVector<double,7UL,rowVector> y; // Evaluating the softmax function y = softmax( x ); // Results in ( 0.024 0.064 0.175 0.475 0.024 0.064 0.175 ) double s = sum( y ); // Results in 1 \endcode // \n \subsection vector_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a vector. // For instance, the following computation \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 }; blaze::StaticVector<int,3UL,rowVector> b( abs( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} 1 \\ 2 \\ 3 \\ \end{array}\right)\f$ // \n \subsection vector_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a vector \a a. For // each element \c i the corresponding result is 1 if \a a[i] is greater than zero, 0 if \a a[i] // is zero, and -1 if \a a[i] is less than zero. For instance, the following use of the \c sign() // function \code blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, 0 }; blaze::StaticVector<int,3UL,rowVector> b( sign( a ) ); \endcode // results in the vector \f$ b = \left(\begin{array}{*{1}{c}} -1 \\ 1 \\ 0 \\ \end{array}\right)\f$ // \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a vector, respectively: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = floor( a ); // Rounding down each element of the vector b = ceil ( a ); // Rounding up each element of the vector b = trunc( a ); // Truncating each element of the vector b = round( a ); // Rounding each element of the vector \endcode // \n \subsection vector_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse vector to compute the complex // conjugate of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Computing the vector of complex conjugates // ( (-2, 1) ) // ( ( 1,-1) ) StaticVector<cplx,2UL> b; b = conj( a ); \endcode // Additionally, vectors can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicVector<cplx> c( 5UL ); conjugate( c ); // In-place conjugate operation. c = conj( c ); // Same as above \endcode // \n \subsection vector_operators_real real() // // The \c real() function can be used on a dense or sparse vector to extract the real part of // each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the real part of each vector element // ( -2 ) // ( 1 ) StaticVector<double,2UL> b; b = real( a ); \endcode // \n \subsection vector_operators_imag imag() // // The \c imag() function can be used on a dense or sparse vector to extract the imaginary part // of each element of the vector: \code using blaze::StaticVector; using cplx = std::complex<double>; // Creating the vector // ( (-2,-1) ) // ( ( 1, 1) ) StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) }; // Extracting the imaginary part of each vector element // ( -1 ) // ( 1 ) StaticVector<double,2UL> b; b = imag( a ); \endcode // \n \subsection vector_operations_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // vector can be computed: \code blaze::DynamicVector<double> a, b, c; b = sqrt( a ); // Computes the square root of each element c = invsqrt( a ); // Computes the inverse square root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a vector: \code blaze::HybridVector<double,3UL> a, b, c; b = cbrt( a ); // Computes the cubic root of each element c = invcbrt( a ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense vectors: \code blaze::StaticVector<double,3UL> a, b, c; c = hypot( a, b ); // Computes the componentwise hypotenuous \endcode // \n \subsection vector_operations_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a vector to a specific range: \code blaze::DynamicVector<double> a, b b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a vector. // If passed a vector and a numeric exponent, the function computes the exponential value of each // element of the vector using the same exponent. If passed a second vector, the function computes // the componentwise exponential value: \code blaze::StaticVector<double,3UL> a, b, c; c = pow( a, 1.2 ); // Computes the exponential value of each element c = pow( a, b ); // Computes the componentwise exponential value \endcode // \n \subsection vector_operations_exp exp() / exp2() / exp10() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // vector, respectively: \code blaze::DynamicVector<double> a, b; b = exp( a ); // Computes the base e exponential of each element b = exp2( a ); // Computes the base 2 exponential of each element b = exp10( a ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a vector: \code blaze::StaticVector<double,3UL> a, b; b = log( a ); // Computes the natural logarithm of each element b = log2( a ); // Computes the binary logarithm of each element b = log10( a ); // Computes the common logarithm of each element \endcode // \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sin( a ); // Computes the sine of each element of the vector b = cos( a ); // Computes the cosine of each element of the vector b = tan( a ); // Computes the tangent of each element of the vector b = asin( a ); // Computes the inverse sine of each element of the vector b = acos( a ); // Computes the inverse cosine of each element of the vector b = atan( a ); // Computes the inverse tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse vectors: \code blaze::DynamicVector<double> a, b; b = sinh( a ); // Computes the hyperbolic sine of each element of the vector b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense vectors: \code blaze::DynamicVector<double> a, b, c; c = atan2( a, b ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection vector_operations_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a vector: \code blaze::StaticVector<double,3UL,rowVector> a, b; b = erf( a ); // Computes the error function of each element b = erfc( a ); // Computes the complementary error function of each element \endcode // Note that in case of sparse vectors only the non-zero elements are taken into account! // // // \n \subsection vector_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse vector. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicVector<double> a, b; b = map( a, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors. The following example demonstrates the merging of two vectors of double // precision values into a vector of double precision complex numbers: \code blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 }; blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 }; blaze::DynamicVector< complex<double> > cplx; // Creating the vector // ( (-2.1, 0.3) ) // ( (-4.2, -1.4) ) // ( ( 1.0, 2.9) ) // ( ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section vector_operations_reduction_operations Reduction Operations // <hr> // // \subsection vector_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs a total reduction of the elements of the given dense vector // or the non-zero elements of the given sparse vector. The following examples demonstrate the // total reduction of a dense and sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double totalsum1 = reduce( a, blaze::Add() ); const double totalsum2 = reduce( a, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedVector<double> a; // ... Resizing and initialization const double totalmin1 = reduce( a, blaze::Min() ); const double totalmin2 = reduce( a, []( double a, double b ){ return blaze::min( a, b ); } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection vector_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalsum = sum( a ); // Results in 10 \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection vector_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode \code blaze::CompressedVector<int> a{ 1, 2, 3, 4 }; const int totalprod = prod( a ); // Results in 24 \endcode // \n \subsection vector_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense vector or the // smallest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmin = min( a ); // Results in -2 \endcode \code blaze::CompressedVector<int> a{ 1, 0, 3, 0 }; const int totalmin = min( a ); // Results in 1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the minimum of the vector is 1. // // \n \subsection vector_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense vector or the // largest non-zero element of the given sparse vector. It can only be used for element types // that support the smaller-than relationship. In case the given vector currently has a size // of 0, the returned value is the default value (e.g. 0 in case of fundamental data types). \code blaze::DynamicVector<int> a{ 1, -2, 3, 0 }; const int totalmax = max( a ); // Results in 3 \endcode \code blaze::CompressedVector<int> a{ -1, 0, -3, 0 }; const int totalmin = max( a ); // Results in -1 \endcode // \note In case the sparse vector is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed vector has only 2 non-zero elements. // However, the maximum of the vector is -1. // // // \n \section vector_operations_norms Norms // <hr> // // \subsection vector_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = norm( a ); \endcode // \n \subsection vector_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = sqrNorm( a ); \endcode // \n \subsection vector_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l1 = l1Norm( a ); \endcode // \n \subsection vector_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l2 = l2Norm( a ); \endcode // \n \subsection vector_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l3 = l3Norm( a ); \endcode // \n \subsection vector_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double l4 = l4Norm( a ); \endcode // \n \subsection vector_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse vector, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double lp1 = lpNorm<2>( a ); // Compile time argument const double lp2 = lpNorm( a, 2.3 ); // Runtime argument \endcode // \n \subsection vector_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse vector: \code blaze::DynamicVector<double> a; // ... Resizing and initialization const double max = maxNorm( a ); \endcode // \n Previous: \ref vector_types &nbsp; &nbsp; Next: \ref matrices */ //************************************************************************************************* //**Matrices*************************************************************************************** /*!\page matrices Matrices // // \tableofcontents // // // \n \section matrices_general General Concepts // <hr> // // The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix, // \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix) // and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be // stored as row-major matrices or column-major matrices: \code using blaze::DynamicMatrix; using blaze::rowMajor; using blaze::columnMajor; // Setup of the 2x3 row-major dense matrix // // ( 1 2 3 ) // ( 4 5 6 ) // DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 }, { 4, 5, 6 } }; // Setup of the 3x2 column-major dense matrix // // ( 1 4 ) // ( 2 5 ) // ( 3 6 ) // DynamicMatrix<int,columnMajor> B{ { 1, 4 }, { 2, 5 }, { 3, 6 } }; \endcode // Per default, all matrices in \b Blaze are row-major matrices: \code // Instantiation of a 3x3 row-major matrix blaze::DynamicMatrix<int> C( 3UL, 3UL ); \endcode // \n \section matrices_details Matrix Details // <hr> // // - \ref matrix_types // - \ref matrix_operations // // // \n \section matrices_examples Examples // <hr> \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix // ... Resizing and initialization C = A * B; \endcode // \n Previous: \ref vector_operations &nbsp; &nbsp; Next: \ref matrix_types */ //************************************************************************************************* //**Matrix Types*********************************************************************************** /*!\page matrix_types Matrix Types // // \tableofcontents // // // \n \section matrix_types_static_matrix StaticMatrix // <hr> // // The blaze::StaticMatrix class template is the representation of a fixed size matrix with // statically allocated elements of arbitrary type. It can be included via the header file \code #include <blaze/math/StaticMatrix.h> \endcode // The type of the elements, the number of rows and columns, and the storage order of the matrix // can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class StaticMatrix; \endcode // - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c M : specifies the total number of rows of the matrix. // - \c N : specifies the total number of columns of the matrix. Note that it is expected // that StaticMatrix is only used for tiny and small matrices. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are // known at compile time: \code // Definition of a 3x4 integral row-major matrix blaze::StaticMatrix<int,3UL,4UL> A; // Definition of a 4x6 single precision row-major matrix blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B; // Definition of a 6x4 double precision column-major matrix blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_dynamic_matrix DynamicMatrix // <hr> // // The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix // with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/DynamicMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class DynamicMatrix; \endcode // - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any // non-cv-qualified, non-reference element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best // choice for medium to large matrices. The number of rows and columns can be modified at runtime: \code // Definition of a 3x4 integral row-major matrix blaze::DynamicMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::DynamicMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_hybrid_matrix HybridMatrix // <hr> // // The HybridMatrix class template combines the flexibility of a dynamically sized matrix with // the efficiency and performance of a fixed size matrix. It is implemented as a crossing between // the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static // matrix it uses static stack memory instead of dynamically allocated memory and similar to the // dynamic matrix it can be resized (within the extend of the static memory). It can be included // via the header file \code #include <blaze/math/HybridMatrix.h> \endcode // The type of the elements, the maximum number of rows and columns and the storage order of the // matrix can be specified via the four template parameters: \code template< typename Type, size_t M, size_t N, bool SO > class HybridMatrix; \endcode // - Type: specifies the type of the matrix elements. HybridMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - M : specifies the maximum number of rows of the matrix. // - N : specifies the maximum number of columns of the matrix. Note that it is expected // that HybridMatrix is only used for tiny and small matrices. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions // are not known at compile time or not fixed at runtime, but whose maximum dimensions are known // at compile time: \code // Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8 blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16 blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6 blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C; \endcode // \n \section matrix_types_custom_matrix CustomMatrix // <hr> // // The blaze::CustomMatrix class template provides the functionality to represent an external // array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data // structure. Thus in contrast to all other dense matrix types a custom matrix does not perform // any kind of memory allocation by itself, but it is provided with an existing array of element // during construction. A custom matrix can therefore be considered an alias to the existing // array. It can be included via the header file \code #include <blaze/math/CustomMatrix.h> \endcode // The type of the elements, the properties of the given array of elements and the storage order // of the matrix can be specified via the following four template parameters: \code template< typename Type, bool AF, bool PF, bool SO > class CustomMatrix; \endcode // - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - AF : specifies whether the represented, external arrays are properly aligned with // respect to the available instruction set (SSE, AVX, ...) or not. // - PF : specified whether the represented, external arrays are properly padded with // respect to the available instruction set (SSE, AVX, ...) or not. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CustomMatrix is the right choice if any external array needs to be represented as // a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be // realized: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays using UnalignedUnpadded = CustomMatrix<int,unaligned,unpadded,rowMajor>; std::vector<int> vec( 12UL ) UnalignedUnpadded A( &vec[0], 3UL, 4UL ); // Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays using UnalignedPadded = CustomMatrix<float,unaligned,padded,columnMajor>; std::unique_ptr<float[]> memory1( new float[40] ); UnalignedPadded B( memory1.get(), 5UL, 6UL, 8UL ); // Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays using AlignedUnpadded = CustomMatrix<double,aligned,unpadded,rowMajor>; std::unique_ptr<double[],Deallocate> memory2( blaze::allocate<double>( 192UL ) ); AlignedUnpadded C( memory2.get(), 12UL, 13UL, 16UL ); // Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays using cplx = complex<double>; using AlignedPadded = CustomMatrix<cplx,aligned,padded,columnMajor>; std::unique_ptr<cplx[],Deallocate> memory3( blaze::allocate<cplx>( 112UL ) ); AlignedPadded D( memory3.get(), 7UL, 14UL, 16UL ); \endcode // In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several // special characteristics. All of these result from the fact that a custom matrix is not // performing any kind of memory allocation, but instead is given an existing array of elements. // The following sections discuss all of these characteristics: // // -# <b>\ref matrix_types_custom_matrix_memory_management</b> // -# <b>\ref matrix_types_custom_matrix_copy_operations</b> // -# <b>\ref matrix_types_custom_matrix_alignment</b> // -# <b>\ref matrix_types_custom_matrix_padding</b> // // \n \subsection matrix_types_custom_matrix_memory_management Memory Management // // The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As // such it provides everything that is required to use the array just like a native \b Blaze dense // matrix data structure. However, this flexibility comes with the price that the user of a custom // matrix is responsible for the resource management. // // The following examples give an impression of several possible types of custom matrices: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unaligned; using blaze::padded; using blaze::unpadded; // Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally // managed integer array. Note that the std::vector must be guaranteed to outlive the // custom matrix! std::vector<int> vec( 12UL ); CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL ); // Definition of a custom 8x12 matrix for an aligned and padded integer array of // capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr // must be guaranteed to outlive the custom matrix! std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) ); CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL ); \endcode // \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations // // As with all dense matrices it is possible to copy construct a custom matrix: \code using blaze::CustomMatrix; using blaze::unaligned; using blaze::unpadded; using CustomType = CustomMatrix<int,unaligned,unpadded>; std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10 CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix a[1] = 20; // Also modifies the std::vector CustomType B( a ); // Creating a copy of vector a b[2] = 20; // Also affects matrix A and the std::vector \endcode // It is important to note that a custom matrix acts as a reference to the specified array. Thus // the result of the copy constructor is a new custom matrix that is referencing and representing // the same array as the original custom matrix. // // In contrast to copy construction, just as with references, copy assignment does not change // which array is referenced by the custom matrices, but modifies the values of the array: \code std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4 CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix A = C; // Copy assignment: Set all values of matrix A and B to 4. \endcode // \n \subsection matrix_types_custom_matrix_alignment Alignment // // In case the custom matrix is specified as \c aligned the passed array must adhere to some // alignment restrictions based on the alignment requirements of the used data type and the // used instruction set (SSE, AVX, ...). The restriction applies to the first element of each // row/column: In case of a row-major matrix the first element of each row must be properly // aligned, in case of a column-major matrix the first element of each column must be properly // aligned. For instance, if a row-major matrix is used and AVX is active the first element of // each row must be 32-bit aligned: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using blaze::rowMajor; // Allocation of 32-bit aligned memory std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) ); CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL ); \endcode // In the example, the row-major matrix has six columns. However, since with AVX eight integer // values are loaded together the matrix is padded with two additional elements. This guarantees // that the first element of each row is 32-bit aligned. In case the alignment requirements are // violated, a \c std::invalid_argument exception is thrown. // // \n \subsection matrix_types_custom_matrix_padding Padding // // Adding padding elements to the end of each row/column can have a significant impact on the // performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double // precision matrices can be added via three SIMD addition operations: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::padded; using CustomType = CustomMatrix<double,aligned,padded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) ); // Creating padded custom 3x3 matrix with an additional padding element in each row CustomType A( memory1.get(), 3UL, 3UL, 4UL ); CustomType B( memory2.get(), 3UL, 3UL, 4UL ); CustomType C( memory3.get(), 3UL, 3UL, 4UL ); // ... Initialization C = A + B; // AVX-based matrix addition \endcode // In this example, maximum performance is possible. However, in case no padding elements are // inserted a scalar addition has to be used: \code using blaze::CustomMatrix; using blaze::Deallocate; using blaze::allocate; using blaze::aligned; using blaze::unpadded; using CustomType = CustomMatrix<double,aligned,unpadded>; std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) ); std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) ); // Creating unpadded custom 3x3 matrix CustomType A( memory1.get(), 3UL, 3UL ); CustomType B( memory2.get(), 3UL, 3UL ); CustomType C( memory3.get(), 3UL, 3UL ); // ... Initialization C = A + B; // Scalar matrix addition \endcode // Note that the construction of padded and unpadded aligned matrices looks identical. However, // in case of padded matrices, \b Blaze will zero initialize the padding element and use them // in all computations in order to achieve maximum performance. In case of an unpadded matrix // \b Blaze will ignore the elements with the downside that it is not possible to load a complete // row to an AVX register, which makes it necessary to fall back to a scalar addition. // // The number of padding elements is required to be sufficient with respect to the available // instruction set: In case of an aligned padded custom matrix the added padding elements must // guarantee that the total number of elements in each row/column is a multiple of the SIMD // vector width. In case of an unaligned padded matrix the number of padding elements can be // greater or equal the number of padding elements of an aligned padded custom matrix. In case // the padding is insufficient with respect to the available instruction set, a // \c std::invalid_argument exception is thrown. // // // \n \section matrix_types_compressed_matrix CompressedMatrix // <hr> // // The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse // matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be // included via the header file \code #include <blaze/math/CompressedMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class CompressedMatrix; \endcode // - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with // any non-cv-qualified, non-reference, non-pointer element type. // - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices: \code // Definition of a 3x4 integral row-major matrix blaze::CompressedMatrix<int> A( 3UL, 4UL ); // Definition of a 4x6 single precision row-major matrix blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL ); // Definition of a double precision column-major matrix with 0 rows and columns blaze::CompressedMatrix<double,blaze::columnMajor> C; \endcode // \n \section matrix_types_identity_matrix IdentityMatrix // <hr> // // The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary // sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included // via the header file \code #include <blaze/math/IdentityMatrix.h> \endcode // The type of the elements and the storage order of the matrix can be specified via the two // template parameters: \code template< typename Type, bool SO > class IdentityMatrix; \endcode // - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any // non-cv-qualified, non-reference, non-pointer element type. // - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix. // The default value is blaze::rowMajor. // // The blaze::IdentityMatrix is the perfect choice to represent an identity matrix: \code // Definition of a 3x3 integral row-major identity matrix blaze::IdentityMatrix<int> A( 3UL ); // Definition of a 6x6 single precision row-major identity matrix blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL ); // Definition of a double precision column-major identity matrix with 0 rows and columns blaze::IdentityMatrix<double,blaze::columnMajor> C; \endcode // \n Previous: \ref matrices &nbsp; &nbsp; Next: \ref matrix_operations */ //************************************************************************************************* //**Matrix Operations****************************************************************************** /*!\page matrix_operations Matrix Operations // // \tableofcontents // // // \n \section matrix_operations_constructors Constructors // <hr> // // Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules // to be aware of: // - In case the last template parameter (the storage order) is omitted, the matrix is per // default stored in row-major order. // - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in // data types are initialized to 0, class types are initialized via the default constructor). // - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized // if they are of built-in type and are default constructed if they are of class type. // // \n \subsection matrix_operations_default_construction Default Construction \code using blaze::StaticMatrix; using blaze::DynamicMatrix; using blaze::CompressedMatrix; // All matrices can be default constructed. Whereas the size of // a StaticMatrix is fixed via the second and third template // parameter, the initial size of a constructed DynamicMatrix // or CompressedMatrix is 0. StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major // matrix. All elements are initialized to 0. DynamicMatrix<float> M2; // Instantiation of a single precision dynamic // row-major matrix with 0 rows and 0 columns. DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic // column-major matrix with 0 rows and 0 columns. CompressedMatrix<int> M4; // Instantiation of a compressed integer // row-major matrix of size 0x0. CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision // column-major matrix of size 0x0. \endcode // \n \subsection matrix_operations_size_construction Construction with Specific Size // // The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor // that allows to immediately give the matrices a specific number of rows and columns: \code DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major // matrix. The elements are not initialized. HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major // matrix. The elements are not initialized. CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed // column-major matrix. \endcode // Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately // allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this // example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory. // // // \n \subsection matrix_operations_initialization_constructors Initialization Constructors // // All dense matrix classes offer a constructor for a direct, homogeneous initialization of all // matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements // can be specified. \code StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major // matrix. All elements are initialized to 7. DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major // matrix. All elements are initialized to 2.0F. CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major // matrix with capacity for 4 non-zero elements. \endcode // \n \subsection matrix_operations_array_construction Array Construction // // Alternatively, all dense matrix classes offer a constructor for an initialization with a // dynamic or static array. If the matrix is initialized from a dynamic array, the constructor // expects the dimensions of values provided by the array as first and second argument, the // array as third argument. In case of a static array, the fixed size of the array is used: \code const std::unique_ptr<double[]> array1( new double[6] ); // ... Initialization of the dynamic array blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() ); int array2[2][2] = { { 4, -5 }, { -6, 7 } }; blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 ); \endcode // \n \subsection matrix_operations_initializer_list_construction // // In addition, all dense and sparse matrix classes can be directly initialized by means of an // initializer list: \code blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F }, { -0.9F, -1.2F }, { 4.8F, 0.6F } }; blaze::CompressedMatrix<int,rowMajor> M15{ { 3 }, { 1 }, { 0, 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are used to initialize the matrix. // Missing values are considered to be default values. // // \n \subsection matrix_operations_copy_construction Copy Construction // // All dense and sparse matrices can be created as a copy of another dense or sparse matrix. \code StaticMatrix<int,5UL,4UL,rowMajor> M16( M6 ); // Instantiation of the dense row-major matrix M16 // as copy of the dense row-major matrix M6. DynamicMatrix<float,columnMajor> M17( M8 ); // Instantiation of the dense column-major matrix M17 // as copy of the sparse column-major matrix M8. CompressedMatrix<double,columnMajor> M18( M7 ); // Instantiation of the compressed column-major matrix // M18 as copy of the dense row-major matrix M7. CompressedMatrix<float,rowMajor> M19( M8 ); // Instantiation of the compressed row-major matrix // M19 as copy of the compressed column-major matrix M8. \endcode // Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different // number of rows and/or columns: \code StaticMatrix<int,4UL,5UL,rowMajor> M20( M6 ); // Runtime error: Number of rows and columns // does not match! StaticMatrix<int,4UL,4UL,columnMajor> M21( M9 ); // Compile time error: Number of columns does // not match! \endcode // \n \section matrix_operations_assignment Assignment // <hr> // // There are several types of assignment to dense and sparse matrices: // \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment, // \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment. // // // \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment // // It is possible to assign the same value to all elements of a dense matrix. All dense matrix // classes provide an according assignment operator: \code blaze::StaticMatrix<int,3UL,2UL> M1; blaze::DynamicMatrix<double> M2; // Setting all integer elements of the StaticMatrix to 4 M1 = 4; // Setting all double precision elements of the DynamicMatrix to 3.5 M2 = 3.5 \endcode // \n \subsection matrix_operations_array_assignment Array Assignment // // Dense matrices can also be assigned a static array: \code blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1; blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2; blaze::DynamicMatrix<double> M3; int array1[2][2] = { { 1, 2 }, { 3, 4 } }; double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M1 = array1; M2 = array1; M3 = array2; \endcode // Note that the dimensions of the static array have to match the size of a \c StaticMatrix, // whereas a \c DynamicMatrix is resized according to the array dimensions: \f$ M3 = \left(\begin{array}{*{2}{c}} 3.1 & 6.4 \\ -0.9 & -1.2 \\ 4.8 & 0.6 \\ \end{array}\right)\f$ // \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment // // Alternatively, it is possible to directly assign an initializer list to a dense or sparse // matrix: \code blaze::DynamicMatrix<double> M1; blaze::CompressedMatrix<int> M2; M1 = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } }; M2 = { { 1, 0 }, {}, { 0, 1 }, { 2 } }; \endcode // In case of sparse matrices, only the non-zero elements are considered. Missing values are // considered to be default values. // // \n \subsection matrix_operations_copy_assignment Copy Assignment // // All kinds of matrices can be assigned to each other. The only restriction is that since a // \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of // rows and in the number of columns. \code blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL ); blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL ); blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL ); blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL ); // ... Initialization of the matrices M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix \endcode // \n \subsection matrix_operations_compound_assignment Compound Assignment // // Compound assignment is also available for matrices: addition assignment, subtraction assignment, // and multiplication assignment. In contrast to plain assignment, however, the number of rows // and columns of the two operands have to match according to the arithmetic operation. \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1; blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL ); blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL ); blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL ); blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5; blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL ); // ... Initialization of the matrices M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix M1 += M4; // Runtime error: No compound assignment between matrices of different size M1 -= M5; // Compilation error: No compound assignment between matrices of different size M2 *= M6; // OK: Multiplication assignment between two row-major matrices \endcode // Note that the multiplication assignment potentially changes the number of columns of the // target matrix: \f$\left(\begin{array}{*{3}{c}} 2 & 0 & 1 \\ 0 & 3 & 2 \\ \end{array}\right) \times \left(\begin{array}{*{2}{c}} 4 & 0 \\ 1 & 0 \\ 0 & 3 \\ \end{array}\right) = \left(\begin{array}{*{2}{c}} 8 & 3 \\ 3 & 6 \\ \end{array}\right)\f$ // Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a // multiplication assignment with other square matrices of the same dimensions. // // // \n \section matrix_operations_element_access Element Access // <hr> // // \n \subsection matrix_operations_function_call_operator_1 Function Call Operator // // The easiest way to access a specific dense or sparse matrix element is via the function call // operator. The indices to access a matrix are zero-based: \code blaze::DynamicMatrix<int> M1( 4UL, 6UL ); M1(0,0) = 1; M1(0,1) = 3; // ... blaze::CompressedMatrix<double> M2( 5UL, 3UL ); M2(0,2) = 4.1; M2(1,1) = -6.3; \endcode // Since dense matrices allocate enough memory for all contained elements, using the function // call operator on a dense matrix directly returns a reference to the accessed value. In case // of a sparse matrix, if the accessed value is currently not contained in the matrix, the // value is inserted into the matrix prior to returning a reference to the value, which can // be much more expensive than the direct access to a dense matrix. Consider the following // example: \code blaze::CompressedMatrix<int> M1( 4UL, 4UL ); for( size_t i=0UL; i<M1.rows(); ++i ) { for( size_t j=0UL; j<M1.columns(); ++j ) { ... = M1(i,j); } } \endcode // Although the compressed matrix is only used for read access within the for loop, using the // function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore // the preferred way to traverse the non-zero elements of a sparse matrix is to use iterators. // // \n \subsection matrix_operations_iterators Iterators // // All matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(), // \c end() and \c cend() functions to traverse all contained elements by iterator. Note that // it is not possible to traverse all elements of the matrix, but that it is only possible to // traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and // \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of // a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned: \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> M1( 4UL, 6UL ); // Traversing the matrix by Iterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } // Traversing the matrix by ConstIterator for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the non-zero element. } } \endcode // Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions: \code for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) { // ... } } for( size_t i=0UL; i<A.rows(); ++i ) { for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) { // ... } } \endcode // \n \section matrix_operations_element_insertion Element Insertion // <hr> // // Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse // matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements // to the matrix. // // \n \subsection matrix_operations_function_call_operator_2 Function Call Operator // // The first possibility to add elements to a sparse matrix is the function call operator: \code using blaze::CompressedMatrix; CompressedMatrix<int> M1( 3UL, 4UL ); M1(1,2) = 9; \endcode // In case the element at the given position is not yet contained in the sparse matrix, it is // automatically inserted. Otherwise the old value is replaced by the new value 2. The operator // returns a reference to the sparse vector element. // // \n \subsection matrix_operations_set .set() // // An alternative to the function call operator is the \c set() function: In case the element is // not yet contained in the matrix the element is inserted, else the element's value is modified: \code // Insert or modify the value at position (2,0) M1.set( 2, 0, 1 ); \endcode // \n \subsection matrix_operations_insert .insert() // The insertion of elements can be better controlled via the \c insert() function. In contrast // to the function call operator and the \c set() function it emits an exception in case the // element is already contained in the matrix. In order to check for this case, the \c find() // function can be used: \code // In case the element at position (2,3) is not yet contained in the matrix it is inserted // with a value of 4. if( M1.find( 2, 3 ) == M1.end( 2 ) ) M1.insert( 2, 3, 4 ); \endcode // \n \subsection matrix_operations_append .append() // // Although the \c insert() function is very flexible, due to performance reasons it is not // suited for the setup of large sparse matrices. A very efficient, yet also very low-level // way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to // provide enough capacity to insert a new element in the specified row/column. Additionally, // the index of the new element must be larger than the index of the previous element in the // same row/column. Violating these conditions results in undefined behavior! \code M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0 M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1 M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2 // ... \endcode // The most efficient way to fill a sparse matrix with elements, however, is a combination of // \c reserve(), \c append(), and the \c finalize() function: \code // Setup of the compressed row-major matrix // // ( 0 1 0 2 0 ) // A = ( 0 0 0 0 0 ) // ( 3 0 0 0 0 ) // blaze::CompressedMatrix<int> M1( 3UL, 5UL ); M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1 M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3 M1.finalize( 0 ); // Finalizing row 0 M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2 M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0 M1.finalize( 2 ); // Finalizing row 2 \endcode // \note The \c finalize() function has to be explicitly called for each row or column, even // for empty ones! // \note Although \c append() does not allocate new memory, it still invalidates all iterators // returned by the \c end() functions! // // // \n \section matrix_operations_element_removal Element Removal // <hr> // // \subsection matrix_operations_erase .erase() // // The \c erase() member functions can be used to remove elements from a sparse matrix. The // following example gives an impression of the five different flavors of \c erase(): \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Erasing the element at position (21,23) A.erase( 21, 23 ); // Erasing a single element in row 17 via iterator A.erase( 17, A.find( 4 ) ); // Erasing all non-zero elements in the range [7..24] of row 33 A.erase( 33, A.lowerBound( 33, 7 ), A.upperBound( 33, 24 ) ); // Erasing all non-zero elements with a value larger than 9 by passing a unary predicate A.erase( []( int i ){ return i > 9; } ); // Erasing all non-zero elements in the range [30..40] of row 37 with a value larger than 5 CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 37, 30 ) ); CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 37, 40 ) ); A.erase( 37, pos1, pos2, []( int i ){ return i > 5; } ); \endcode // \n \section matrix_operations_element_lookup Element Lookup // <hr> // // A sparse matrix only stores the non-zero elements contained in the matrix. Therefore, whenever // accessing a matrix element at a specific position a lookup operation is required. Whereas the // function call operator is performing this lookup automatically, it is also possible to use the // \c find(), \c lowerBound(), and \c upperBound() member functions for a manual lookup. // // \n \subsection matrix_operations_find .find() // // The \c find() function can be used to check whether a specific element is contained in the // sparse matrix. It specifically searches for the element at the specified position. In case // the element is found, the function returns an iterator to the element. Otherwise an iterator // just past the last non-zero element of the according row or column (the \c end() iterator) // is returned. Note that the returned iterator is subject to invalidation due to inserting // operations via the function call operator, the \c set() function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the element at position (7,17). In case the element is not // contained in the vector, the end() iterator of row 7 is returned. CompressedMatrix<int,rowMajor>::Iterator pos( A.find( 7, 17 ) ); if( pos != A.end( 7 ) ) { // ... } \endcode // \n \subsection matrix_operations_lowerbound .lowerBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index not less then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index not less then the given row // index. In combination with the \c upperBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,rowMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of column index 17 in row 7. CompressedMatrix<int,rowMajor>::Iterator pos1( A.lowerBound( 7, 17 ) ); // Searching the upper bound of column index 28 in row 7 CompressedMatrix<int,rowMajor>::Iterator pos2( A.upperBound( 7, 28 ) ); // Erasing all elements in the specified range A.erase( 7, pos1, pos2 ); \endcode // \n \subsection matrix_operations_upperbound .upperBound() // // In case of a row-major matrix, this function returns a row iterator to the first element with // an index greater then the given column index. In case of a column-major matrix, the function // returns a column iterator to the first element with an index greater then the given row // index. In combination with the \c lowerBound() function this function can be used to create a // pair of iterators specifying a range of indices. Note that the returned iterator is subject // to invalidation due to inserting operations via the function call operator, the \c set() // function or the \c insert() function! \code using blaze::CompressedMatrix; CompressedMatrix<int,columnMajor> A( 42, 53 ); // ... Initialization of the matrix // Searching the lower bound of row index 17 in column 9. CompressedMatrix<int,columnMajor>::Iterator pos1( A.lowerBound( 17, 9 ) ); // Searching the upper bound of row index 28 in column 9 CompressedMatrix<int,columnMajor>::Iterator pos2( A.upperBound( 28, 9 ) ); // Erasing all elements in the specified range A.erase( 9, pos1, pos2 ); \endcode // \n \section matrix_operations_non_modifying_operations Non-Modifying Operations // <hr> // // \subsection matrix_operations_rows .rows() // // The current number of rows of a matrix can be acquired via the \c rows() member function: \code // Instantiating a dynamic matrix with 10 rows and 8 columns blaze::DynamicMatrix<int> M1( 10UL, 8UL ); M1.rows(); // Returns 10 // Instantiating a compressed matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.rows(); // Returns 8 \endcode // Alternatively, the free functions \c rows() can be used to query the current number of rows of // a matrix. In contrast to the member function, the free function can also be used to query the // number of rows of a matrix expression: \code rows( M1 ); // Returns 10, i.e. has the same effect as the member function rows( M2 ); // Returns 8, i.e. has the same effect as the member function rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix \endcode // \n \subsection matrix_operations_columns .columns() // // The current number of columns of a matrix can be acquired via the \c columns() member function: \code // Instantiating a dynamic matrix with 6 rows and 8 columns blaze::DynamicMatrix<int> M1( 6UL, 8UL ); M1.columns(); // Returns 8 // Instantiating a compressed matrix with 8 rows and 7 columns blaze::CompressedMatrix<double> M2( 8UL, 7UL ); M2.columns(); // Returns 7 \endcode // There is also a free function \c columns() available, which can also be used to query the number // of columns of a matrix expression: \code columns( M1 ); // Returns 8, i.e. has the same effect as the member function columns( M2 ); // Returns 7, i.e. has the same effect as the member function columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix \endcode // \subsection matrix_operations_spacing .spacing() // // The total number of elements of a row or column of a dense matrix, including potential padding // elements, can be acquired via the \c spacing member function. In case of a row-major matrix // (i.e. in case the storage order is set to blaze::rowMajor) the function returns the spacing // between two rows, in case of a column-major matrix (i.e. in case the storage flag is set to // blaze::columnMajor) the function returns the spacing between two columns: \code // Instantiating a row-major dynamic matrix with 7 rows and 8 columns blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 8UL ); M1.spacing(); // Returns the total number of elements in a row // Instantiating a column-major dynamic matrix with 8 rows and 12 columns blaze::CompressedMatrix<double> M2( 8UL, 12UL ); M2.spacing(); // Returns the total number of element in a column \endcode // Alternatively, the free functions \c spacing() can be used to query the current number of // elements in a row/column. \code spacing( M1 ); // Returns the total number of elements in a row spacing( M2 ); // Returns the total number of elements in a column \endcode // \n \subsection matrix_operations_capacity .capacity() // // The \c capacity() member function returns the internal capacity of a dense or sparse matrix. // Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of // a dense matrix the capacity will always be greater or equal than the total number of elements // of the matrix. In case of a sparse matrix, the capacity will usually be much less than the // total number of elements. \code blaze::DynamicMatrix<float> M1( 5UL, 7UL ); blaze::StaticMatrix<float,7UL,4UL> M2; M1.capacity(); // Returns at least 35 M2.capacity(); // Returns at least 28 \endcode // There is also a free function \c capacity() available to query the capacity. However, please // note that this function cannot be used to query the capacity of a matrix expression: \code capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function capacity( M1 * M2 ); // Compilation error! \endcode // \n \subsection matrix_operations_nonzeros .nonZeros() // // For both dense and sparse matrices the current number of non-zero elements can be queried // via the \c nonZeros() member function. In case of matrices there are two flavors of the // \c nonZeros() function: One returns the total number of non-zero elements in the matrix, // the second returns the number of non-zero elements in a specific row (in case of a row-major // matrix) or column (in case of a column-major matrix). Sparse matrices directly return their // number of non-zero elements, dense matrices traverse their elements and count the number of // non-zero elements. \code blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL ); // ... Initializing the dense matrix M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2 \endcode \code blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL ); // ... Initializing the sparse matrix M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3 \endcode // The free \c nonZeros() function can also be used to query the number of non-zero elements in a // matrix expression. However, the result is not the exact number of non-zero elements, but may be // a rough estimation: \code nonZeros( M1 ); // Has the same effect as the member function nonZeros( M1, 2 ); // Has the same effect as the member function nonZeros( M2 ); // Has the same effect as the member function nonZeros( M2, 3 ); // Has the same effect as the member function nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression \endcode // \n \subsection matrix_operations_isnan isnan() // // The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number // elements: \code blaze::DynamicMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode \code blaze::CompressedMatrix<double> A( 3UL, 4UL ); // ... Initialization if( isnan( A ) ) { ... } \endcode // If at least one element of the matrix is not-a-number, the function returns \c true, otherwise // it returns \c false. Please note that this function only works for matrices with floating point // elements. The attempt to use it for a matrix with a non-floating point element type results in // a compile time error. // // // \n \subsection matrix_operations_isdefault isDefault() // // The \c isDefault() function returns whether the given dense or sparse matrix is in default state: \code blaze::HybridMatrix<int,5UL,4UL> A; // ... Resizing and initialization if( isDefault( A ) ) { ... } \endcode // A matrix is in default state if it appears to just have been default constructed. All resizable // matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in // default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all // submatrices) is in default state if all its elements are in default state. For instance, in case // the matrix is instantiated for a built-in integral or floating point data type, the function // returns \c true in case all matrix elements are 0 and \c false in case any matrix element is // not 0. // // // \n \subsection matrix_operations_isSquare isSquare() // // Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the // number of columns) can be checked via the \c isSquare() function: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization if( isSquare( A ) ) { ... } \endcode // \n \subsection matrix_operations_issymmetric isSymmetric() // // Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix // is symmetric: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isSymmetric( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be symmetric! // // // \n \subsection matrix_operations_isUniform isUniform() // // In order to check if all matrix elements are identical, the \c isUniform function can be used: \code blaze::DynamicMatrix<int> A; // ... Resizing and initialization if( isUniform( A ) ) { ... } \endcode // Note that in case of a sparse matrix also the zero elements are also taken into account! // // // \n \subsection matrix_operations_islower isLower() // // Via the \c isLower() function it is possible to check whether a dense or sparse matrix is // lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower triangular! // // // \n \subsection matrix_operations_isunilower isUniLower() // // Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is // lower unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be lower unitriangular! // // // \n \subsection matrix_operations_isstrictlylower isStrictlyLower() // // Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix // is strictly lower triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyLower( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly lower triangular! // // // \n \subsection matrix_operations_isUpper isUpper() // // Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is // upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper triangular! // // // \n \subsection matrix_operations_isuniupper isUniUpper() // // Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is // upper unitriangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isUniUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be upper unitriangular! // // // \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper() // // Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix // is strictly upper triangular: \code blaze::DynamicMatrix<float> A; // ... Resizing and initialization if( isStrictlyUpper( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be strictly upper triangular! // // // \n \subsection matrix_operations_isdiagonal isDiagonal() // // The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix, // i.e. if it has only elements on its diagonal and if the non-diagonal elements are default // elements: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isDiagonal( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be diagonal! // // // \n \subsection matrix_operations_isidentity isIdentity() // // The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix, // i.e. if all diagonal elements are 1 and all non-diagonal elements are 0: \code blaze::CompressedMatrix<float> A; // ... Resizing and initialization if( isIdentity( A ) ) { ... } \endcode // Note that non-square matrices are never considered to be identity matrices! // // // \n \subsection matrix_operations_matrix_determinant det() // // The determinant of a square dense matrix can be computed by means of the \c det() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization double d = det( A ); // Compute the determinant of A \endcode // In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // \note The \c det() function can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The function is depending on LAPACK kernels. Thus the function can only be used if the // fitting LAPACK library is available and linked to the executable. Otherwise a linker error // will be created. // // // \n \subsection matrix_operations_matrix_trans trans() // // Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into // a column-major matrix and vice versa: \code blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL ); blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL ); M1 = M2; // Assigning a column-major matrix to a row-major matrix M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1 M1 += trans( M2 ); // Addition assignment of two row-major matrices \endcode // \n \subsection matrix_operations_ctrans ctrans() // // The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian // conjugate, or transjugate) can be computed via the \c ctrans() function: \code blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL ); blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL ); M1 = ctrans( M2 ); // Compute the conjugate transpose matrix \endcode // Note that the \c ctrans() function has the same effect as manually applying the \c conj() and // \c trans() function in any order: \code M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix \endcode // \n \subsection matrix_operations_matrix_evaluate eval() / evaluate() // // The \c evaluate() function forces an evaluation of the given matrix expression and enables // an automatic deduction of the correct result type of an operation. The following code example // demonstrates its intended use for the multiplication of a lower and a strictly lower dense // matrix: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::StrictlyLowerMatrix; LowerMatrix< DynamicMatrix<double> > A; StrictlyLowerMatrix< DynamicMatrix<double> > B; // ... Resizing and initialization auto C = evaluate( A * B ); \endcode // In this scenario, the \c evaluate() function assists in deducing the exact result type of // the operation via the \c auto keyword. Please note that if \c evaluate() is used in this // way, no temporary matrix is created and no copy operation is performed. Instead, the result // is directly written to the target matrix due to the return value optimization (RVO). However, // if \c evaluate() is used in combination with an explicit target type, a temporary will be // created and a copy operation will be performed if the used type differs from the type // returned from the function: \code StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation DynamicMatrix<double> F( A * B ); // Temporary & copy operation D = evaluate( A * B ); // Temporary & copy operation \endcode // Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger // expression. However, please note that \c evaluate() is not intended to be used for this // purpose. This task is more elegantly and efficiently handled by the \c eval() function: \code blaze::DynamicMatrix<double> A, B, C, D; D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix D = A + eval( B * C ); // No creation of a temporary matrix \endcode // In contrast to the \c evaluate() function, \c eval() can take the complete expression // into account and therefore can guarantee the most efficient way to evaluate it (see also // \ref intra_statement_optimization). // // // \n \section matrix_operations_modifying_operations Modifying Operations // <hr> // // \subsection matrix_operations_resize_reserve .resize() / .reserve() // // The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template // parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns // of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<int,rowMajor> M1; CompressedMatrix<int,columnMajor> M2( 3UL, 2UL ); // Adapting the number of rows and columns via the resize() function. The (optional) // third parameter specifies whether the existing elements should be preserved. Per // default, the existing elements are preserved. M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type // remain uninitialized, elements of class type are default // constructed. M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the // new elements are NOT initialized! M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved. M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost. \endcode // Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices) // on the matrix: \code blaze::DynamicMatrix<int,rowMajor> M1( 10UL, 20UL ); // Creating a 10x20 matrix auto row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view \endcode // When the internal capacity of a matrix is no longer sufficient, the allocation of a larger // junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve() // function can be used up front to set the internal capacity: \code blaze::DynamicMatrix<int> M1; M1.reserve( 100 ); M1.rows(); // Returns 0 M1.capacity(); // Returns at least 100 \endcode // Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or // column (for a column-major matrix): \code blaze::CompressedMatrix<int> M1( 4UL, 6UL ); M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1 \endcode // \n \subsection matrix_operations_shrinkToFit .shrinkToFit() // // The internal capacity of matrices with dynamic memory is preserved in order to minimize the // number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead // to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal // capacity: \code blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved M1.shrinkToFit(); // Remove the unused capacity \endcode // Please note that due to padding the capacity might not be reduced exactly to \c rows() times // \c columns(). Please also note that in case a reallocation occurs, all iterators (including // \c end() iterators), all pointers and references to elements of this matrix are invalidated. // // // \subsection matrix_operations_reset_clear reset() / clear // // In order to reset all elements of a dense or sparse matrix, the \c reset() function can be // used. The number of rows and columns of the matrix are preserved: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. reset( M1 ); // Resetting all elements M1.rows(); // Returns 4: size and capacity remain unchanged \endcode // Alternatively, only a single row or column of the matrix can be resetted: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix \endcode // In order to reset a row of a column-major matrix or a column of a row-major matrix, use a // row or column view (see \ref views_rows and views_colums). // // In order to return a matrix to its default state (i.e. the state of a default constructed // matrix), the \c clear() function can be used: \code // Setting up a single precision row-major matrix, whose elements are initialized with 2.0F. blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F ); // Resetting all elements to 0.0F. clear( M1 ); // Resetting the entire matrix M1.rows(); // Returns 0: size is reset, but capacity remains unchanged \endcode // \n \subsection matrix_operations_matrix_transpose transpose() // // In addition to the non-modifying \c trans() function, matrices can be transposed in-place via // the \c transpose() function: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); transpose( M ); // In-place transpose operation. M = trans( M ); // Same as above \endcode // Note however that the transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_ctranspose ctranspose() // // The \c ctranspose() function can be used to perform an in-place conjugate transpose operation: \code blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL ); ctranspose( M ); // In-place conjugate transpose operation. M = ctrans( M ); // Same as above \endcode // Note however that the conjugate transpose operation fails if ... // // - ... the given matrix has a fixed size and is non-square; // - ... the given matrix is a triangular matrix; // - ... the given submatrix affects the restricted parts of a triangular matrix; // - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix. // // // \n \subsection matrix_operations_swap swap() // // Via the \c \c swap() function it is possible to completely swap the contents of two matrices // of the same type: \code blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL ); blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL ); swap( M1, M2 ); // Swapping the contents of M1 and M2 \endcode // \n \section matrix_operations_arithmetic_operations Arithmetic Operations // <hr> // // \subsection matrix_operations_min_max min() / max() // // The \c min() and \c max() functions can be used for a single vector or multiple vectors. If // passed a single matrix, the functions return the smallest and largest element of the given // dense matrix or the smallest and largest non-zero element of the given sparse matrix, // respectively: \code blaze::StaticMatrix<int,2UL,3UL> A{ { -5, 2, 7 }, { -4, 0, 1 } }; min( A ); // Returns -5 max( A ); // Returns 7 \endcode \code blaze::CompressedMatrix<int> B{ { 1, 0, 3 }, { 0, 0, 0 } }; min( B ); // Returns 1 max( B ); // Returns 3 \endcode // For more information on the unary \c min() and \c max() reduction operations see the // \ref matrix_operations_reduction_operations section. // // If passed two or more dense matrices, the \c min() and \c max() functions compute the // componentwise minimum or maximum of the given matrices, respectively: \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> C{ { -5, 1, -7 }, { 4, 1, 0 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> D{ { -5, 3, 0 }, { 2, 2, -2 } }; min( A, C ); // Results in the matrix ( -5, 1, -7 ) ( -4, 0, 0 ) max( A, C, D ); // Results in the matrix ( -5, 3, 7 ) ( 4, 2, 1 ) \endcode // Please note that sparse matrices can only be used in the unary \c min() and \c max() functions. // Also note that all forms of the \c min() and \c max() functions can be used to compute the // smallest and largest element of a matrix expression: \code min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix \endcode // \n \subsection matrix_operators_softmax softmax() // // The <a href="https://en.wikipedia.org/wiki/Softmax_function">softmax function</a>, also called // the normalized exponential function, of a given dense matrix can be computed via \c softmax(). // The resulting dense matrix consists of real values in the range (0..1], which add up to 1. \code blaze::StaticMatrix<double,3UL,3UL> A{ { 1.0, 2.0, 3.0 } , { 4.0, 1.0, 2.0 } , { 3.0, 4.0, 1.0 } }; blaze::StaticMatrix<double,3UL,3UL> B; // Evaluating the softmax function B = softmax( A ); // Results in ( 0.0157764 0.0428847 0.116573 ) // ( 0.316878 0.0157764 0.0428847 ) // ( 0.116573 0.316878 0.0157764 ) double s = sum( B ); // Results in 1 \endcode // \n \subsection matrix_operators_trace trace() // // The \c trace() function sums the diagonal elements of a square dense or sparse matrix: \code blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 } , { -4, -5, 6 } , { 7, -8, -9 } }; trace( A ); // Returns the sum of the diagonal elements, i.e. -15 \endcode // In case the given matrix is not a square matrix, a \c std::invalid_argument exception is // thrown. // // // \n \subsection matrix_operators_abs abs() // // The \c abs() function can be used to compute the absolute values of each element of a matrix. // For instance, the following computation \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 }, { 4, -5, 6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_sign sign() // // The \c sign() function can be used to evaluate the sign of each element of a matrix \a A. For // each element \c (i,j) the corresponding result is 1 if \a A(i,j) is greater than zero, 0 if // \a A(i,j) is zero, and -1 if \a A(i,j) is less than zero. For instance, the following use of // the \c sign() function \code blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, 0 }, { 4, 0, -6 } }; blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( sign( A ) ); \endcode // results in the matrix \f$ B = \left(\begin{array}{*{3}{c}} -1 & 1 & 0 \\ 1 & 0 & -1 \\ \end{array}\right)\f$ // \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round() // // The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up // each element of a matrix, respectively: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = floor( A ); // Rounding down each element of the matrix B = ceil ( A ); // Rounding up each element of the matrix B = trunc( A ); // Truncating each element of the matrix B = round( A ); // Rounding each element of the matrix \endcode // \n \subsection matrix_operators_conj conj() // // The \c conj() function can be applied on a dense or sparse matrix to compute the complex // conjugate of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Computing the matrix of conjugate values // ( (1, 0) (-2, 1) ) // ( (1,-1) ( 0,-1) ) StaticMatrix<cplx,2UL,2UL> B; B = conj( A ); \endcode // Additionally, matrices can be conjugated in-place via the \c conjugate() function: \code blaze::DynamicMatrix<cplx> C( 5UL, 2UL ); conjugate( C ); // In-place conjugate operation. C = conj( C ); // Same as above \endcode // \n \subsection matrix_operators_real real() // // The \c real() function can be used on a dense or sparse matrix to extract the real part of // each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the real part of each matrix element // ( 1 -2 ) // ( 1 0 ) StaticMatrix<double,2UL,2UL> B; B = real( A ); \endcode // \n \subsection matrix_operators_imag imag() // // The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part // of each element of the matrix: \code using blaze::StaticMatrix; using cplx = std::complex<double>; // Creating the matrix // ( (1,0) (-2,-1) ) // ( (1,1) ( 0, 1) ) StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) }, { cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } }; // Extracting the imaginary part of each matrix element // ( 0 -1 ) // ( 1 1 ) StaticMatrix<double,2UL,2UL> B; B = imag( A ); \endcode // \n \subsection matrix_operators_sqrt sqrt() / invsqrt() // // Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a // matrix can be computed: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; B = sqrt( A ); // Computes the square root of each element C = invsqrt( A ); // Computes the inverse square root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_cbrt cbrt() / invcbrt() // // The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root // of each element of a matrix: \code blaze::DynamicMatrix<double> A, B, C; B = cbrt( A ); // Computes the cubic root of each element C = invcbrt( A ); // Computes the inverse cubic root of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_hypot hypot() // // The \c hypot() function can be used to compute the componentwise hypotenous for a pair of // dense matrices: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = hypot( A, B ); // Computes the componentwise hypotenuous \endcode // \n \subsection matrix_operators_clamp clamp() // // The \c clamp() function can be used to restrict all elements of a matrix to a specific range: \code blaze::DynamicMatrix<double> A, B; B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1] \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_pow pow() // // The \c pow() function can be used to compute the exponential value of each element of a matrix. // If passed a matrix and a numeric exponent, the function computes the exponential value of each // element of the matrix using the same exponent. If passed a second matrix, the function computes // the componentwise exponential value: \code blaze::StaticMatrix<double,3UL,3UL> A, B, C; C = pow( A, 1.2 ); // Computes the exponential value of each element C = pow( A, B ); // Computes the componentwise exponential value \endcode // \n \subsection matrix_operators_exp exp() // // \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a // matrix, respectively: \code blaze::HybridMatrix<double,3UL,3UL> A, B; B = exp( A ); // Computes the base e exponential of each element B = exp2( A ); // Computes the base 2 exponential of each element B = exp10( A ); // Computes the base 10 exponential of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_log log() / log2() / log10() // // The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary // and common logarithm of each element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = log( A ); // Computes the natural logarithm of each element B = log2( A ); // Computes the binary logarithm of each element B = log10( A ); // Computes the common logarithm of each element \endcode // \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan() // // The following trigonometric functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sin( A ); // Computes the sine of each element of the matrix B = cos( A ); // Computes the cosine of each element of the matrix B = tan( A ); // Computes the tangent of each element of the matrix B = asin( A ); // Computes the inverse sine of each element of the matrix B = acos( A ); // Computes the inverse cosine of each element of the matrix B = atan( A ); // Computes the inverse tangent of each element of the matrix \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh() // // The following hyperbolic functions are available for both dense and sparse matrices: \code blaze::DynamicMatrix<double> A, B; B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix \endcode // \n \subsection matrix_operations_atan2 atan2() // // The multi-valued inverse tangent is available for a pair of dense matrices: \code blaze::DynamicMatrix<double> A, B, C; C = atan2( A, B ); // Computes the componentwise multi-valued inverse tangent \endcode // \n \subsection matrix_operators_erf erf() / erfc() // // The \c erf() and \c erfc() functions compute the (complementary) error function of each // element of a matrix: \code blaze::StaticMatrix<double,3UL,3UL> A, B; B = erf( A ); // Computes the error function of each element B = erfc( A ); // Computes the complementary error function of each element \endcode // Note that in case of sparse matrices only the non-zero elements are taken into account! // // // \n \subsection matrix_operations_map map() / forEach() // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on matrices. The unary \c map() function can be used to apply a custom operation // on each element of a dense or sparse matrix. For instance, the following example demonstrates // a custom square root computation via a lambda: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense matrices. The following example demonstrates the merging of two matrices of double // precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // Although the computation can be parallelized it is not vectorized and thus cannot perform at // peak performance. However, it is also possible to create vectorized custom operations. See // \ref custom_operations for a detailed overview of the possibilities of custom operations. // // Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in // form of the \c forEach() function. With the introduction of binary custom functions, the // \c forEach() function has been renamed to \c map(). The \c forEach() function can still be // used (even for binary custom operations), but the function might be deprecated in future // releases of \b Blaze. // // // \n \section matrix_operations_reduction_operations Reduction Operations // <hr> // // \subsection matrix_operations_reduction_operations_reduce reduce() // // The \c reduce() function performs either a total reduction, a rowwise reduction or a columnwise // reduction of the elements of the given dense matrix or the non-zero elements of the given sparse // matrix. The following examples demonstrate the total reduction of a dense and sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode \code blaze::CompressedMatrix<double> A; // ... Resizing and initialization const double totalsum1 = reduce( A, blaze::Add() ); const double totalsum2 = reduce( A, []( double a, double b ){ return a + b; } ); \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c reduce() function performs a // column-wise or row-wise reduction, respectively. In case \c blaze::columnwise is specified, the // (non-zero) elements of the matrix are reduced column-wise and the result is a row vector. In // case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are reduced row-wise // and the result is a column vector: \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,rowVector> colsum1, colsum2; // ... Resizing and initialization colsum1 = reduce<columnwise>( A, blaze::Add() ); colsum2 = reduce<columnwise>( B, []( double a, double b ){ return a + b; } ); \endcode \code blaze::DynamicMatrix<double> A; blaze::CompressedMatrix<double> B; blaze::DynamicVector<double,columnVector> rowsum1, rowsum2; // ... Resizing and initialization rowsum1 = reduce<rowwise>( A, blaze::Add() ); rowsum2 = reduce<rowwise>( B, []( double a, double b ){ return a + b; } ); \endcode // As demonstrated in the examples it is possible to pass any binary callable as custom reduction // operation. However, for instance in the case of lambdas the vectorization of the reduction // operation is compiler dependent and might not perform at peak performance. However, it is also // possible to create vectorized custom operations. See \ref custom_operations for a detailed // overview of the possibilities of custom operations. // // Please note that the evaluation order of the \c reduce() function is unspecified. Thus the // behavior is non-deterministic if the given reduction operation is not associative or not // commutative. Also, the operation is undefined if the given reduction operation modifies the // values. // // \n \subsection matrix_operations_reduction_operations_sum sum() // // The \c sum() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of addition: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode \code blaze::CompressedMatrix<int> a{ { 1, 2 }, { 3, 4 } }; const int totalsum = sum( A ); // Results in 10 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c sum() function performs a // column-wise or row-wise summation, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are summed up column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are summed up // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colsum1, colsum2; colsum1 = sum<columnwise>( A ); // Results in ( 2, 3, 6 ) colsum2 = sum<columnwise>( B ); // Same result \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowsum1, rowsum2; rowsum1 = sum<rowwise>( A ); // Results in ( 3, 8 ) rowsum2 = sum<rowwise>( B ); // Same result \endcode // Please note that the evaluation order of the \c sum() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_prod prod() // // The \c prod() function reduces the elements of the given dense vector or the non-zero elements // of the given sparse vector by means of multiplication: \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalprod = prod( A ); // Results in 24 \endcode // By specifying \c blaze::columnwise or \c blaze::rowwise the \c prod() function performs a // column-wise or row-wise multiplication, respectively. In case \c blaze::columnwise is specified, // the (non-zero) elements of the matrix are multiplied column-wise and the result is a row vector. // In case \c blaze::rowwise is specified, the (non-zero) elements of the matrix are multiplied // row-wise and the result is a column vector: \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colprod1, colprod2; colprod1 = prod<columnwise>( A ); // Results in ( 1, 0, 8 ) colprod2 = prod<columnwise>( A ); // Results in ( 1, 3, 8 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowprod1, rowprod2; rowprod1 = prod<rowwise>( A ); // Results in ( 0, 12 ) rowprod2 = prod<rowwise>( A ); // Results in ( 2, 12 ) \endcode // Please note that the evaluation order of the \c prod() function is unspecified. // // \n \subsection matrix_operations_reduction_operations_min min() // // The unary \c min() function returns the smallest element of the given dense matrix or the // smallest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmin = min( A ); // Results in 1 \endcode \code blaze::CompressedMatrix<int> A{ { 1, 0 }, { 3, 0 } }; const int totalmin = min( A ); // Results in 1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the minimum of this matrix is 1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c min() function determines the // smallest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the smallest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the smallest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,rowVector> colmin1, colmin2; colmin1 = min<columnwise>( A ); // Results in ( 1, 0, 2 ) colmin2 = min<columnwise>( B ); // Results in ( 1, 3, 2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::DynamicVector<int,columnVector> rowmin1, rowmin2; rowmin1 = min<rowwise>( A ); // Results in ( 0, 1 ) rowmin2 = min<rowwise>( B ); // Results in ( 1, 1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // \n \subsection matrix_operations_reduction_operations_max max() // // The unary \c max() function returns the largest element of the given dense matrix or the // largest non-zero element of the given sparse matrix. This function can only be used for // element types that support the smaller-than relationship. In case the given matrix currently // has either 0 rows or 0 columns, the returned value is the default value (e.g. 0 in case of // fundamental data types). \code blaze::DynamicMatrix<int> A{ { 1, 2 }, { 3, 4 } }; const int totalmax = max( A ); // Results in 4 \endcode \code blaze::CompressedMatrix<int> A{ { -1, 0 }, { -3, 0 } }; const int totalmax = max( A ); // Results in -1 \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. In the previous example the compressed matrix has only 2 non-zero elements. // However, the maximum of this matrix is -1. // // By specifying \c blaze::columnwise or \c blaze::rowwise the \c max() function determines the // largest (non-zero) element in each row or column, respectively. In case \c blaze::columnwise // is specified, the largest (non-zero) element of each column is determined and the result is // a row vector. In case \c blaze::rowwise is specified, the largest (non-zero) element of each // row is determined and the result is a column vector. \code using blaze::columnwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,rowVector> colmax1, colmax2; colmax1 = max<columnwise>( A ); // Results in ( 1, 3, 4 ) colmax2 = max<columnwise>( B ); // Results in ( -1, -3, -2 ) \endcode \code using blaze::rowwise; blaze::DynamicMatrix<int> A{ { 1, 0, 2 }, { 1, 3, 4 } }; blaze::CompressedMatrix<int> B{ { -1, 0, -2 }, { -1, -3, -4 } }; blaze::DynamicVector<int,columnVector> rowmax1, rowmax2; rowmax1 = max<rowwise>( A ); // Results in ( 2, 4 ) rowmax2 = max<rowwise>( B ); // Results in ( -1, -1 ) \endcode // \note In case the sparse matrix is not completely filled, the implicit zero elements are NOT // taken into account. // // // \n \section matrix_operations_norms Norms // <hr> // // \subsection matrix_operations_norms_norm norm() // // The \c norm() function computes the L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = norm( A ); \endcode // \n \subsection matrix_operations_norms_sqrnorm sqrNorm() // // The \c sqrNorm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = sqrNorm( A ); \endcode // \n \subsection matrix_operations_norms_l1norm l1Norm() // // The \c l1Norm() function computes the squared L1 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l1 = l1Norm( A ); \endcode // \n \subsection matrix_operations_norms_l2norm l2Norm() // // The \c l2Norm() function computes the squared L2 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l2 = l2Norm( A ); \endcode // \n \subsection matrix_operations_norms_l3norm l3Norm() // // The \c l3Norm() function computes the squared L3 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l3 = l3Norm( A ); \endcode // \n \subsection matrix_operations_norms_l4norm l4Norm() // // The \c l4Norm() function computes the squared L4 norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double l4 = l4Norm( A ); \endcode // \n \subsection matrix_operations_norms_lpnorm lpNorm() // // The \c lpNorm() function computes the general Lp norm of the given dense or sparse matrix, // where the norm is specified by either a compile time or a runtime argument: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double lp1 = lpNorm<2>( A ); // Compile time argument const double lp2 = lpNorm( A, 2.3 ); // Runtime argument \endcode // \n \subsection matrix_operations_norms_maxnorm maxNorm() // // The \c maxNorm() function computes the maximum norm of the given dense or sparse matrix: \code blaze::DynamicMatrix<double> A; // ... Resizing and initialization const double max = maxNorm( A ); \endcode // \n \section matrix_operations_declaration_operations Declaration Operations // <hr> // // \subsection matrix_operations_declsym declsym() // // The \c declsym() operation can be used to explicitly declare any matrix or matrix expression // as symmetric: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declsym( A ); \endcode // Any matrix or matrix expression that has been declared as symmetric via \c declsym() will // gain all the benefits of a symmetric matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; DynamicMatrix<double> A, B, C; SymmetricMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isSymmetric( declsym( A ) ); // Will always return true without runtime effort S = declsym( A ); // Omit any runtime check for symmetry C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declsym() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-symmetric matrix or // matrix expression as symmetric via the \c declsym() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declherm declherm() // // The \c declherm() operation can be used to explicitly declare any matrix or matrix expression // as Hermitian: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declherm( A ); \endcode // Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will // gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; DynamicMatrix<double> A, B, C; HermitianMatrix< DynamicMatrix<double> > S; // ... Resizing and initialization isHermitian( declherm( A ) ); // Will always return true without runtime effort S = declherm( A ); // Omit any runtime check for Hermitian symmetry C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declherm() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-Hermitian matrix or // matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior // (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decllow decllow() // // The \c decllow() operation can be used to explicitly declare any matrix or matrix expression // as lower triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decllow( A ); \endcode // Any matrix or matrix expression that has been declared as lower triangular via \c decllow() // will gain all the benefits of a lower triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; DynamicMatrix<double> A, B, C; LowerMatrix< DynamicMatrix<double> > L; // ... Resizing and initialization isLower( decllow( A ) ); // Will always return true without runtime effort L = decllow( A ); // Omit any runtime check for A being a lower matrix C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decllow() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-lower matrix or // matrix expression as lower triangular via the \c decllow() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declupp declupp() // // The \c declupp() operation can be used to explicitly declare any matrix or matrix expression // as upper triangular: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declupp( A ); \endcode // Any matrix or matrix expression that has been declared as upper triangular via \c declupp() // will gain all the benefits of a upper triangular matrix, which range from reduced runtime // checking to a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::UpperMatrix; DynamicMatrix<double> A, B, C; UpperMatrix< DynamicMatrix<double> > U; // ... Resizing and initialization isUpper( declupp( A ) ); // Will always return true without runtime effort U = declupp( A ); // Omit any runtime check for A being a upper matrix C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c declupp() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-upper matrix or // matrix expression as upper triangular via the \c declupp() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_decldiag decldiag() // // The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression // as diagonal: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = decldiag( A ); \endcode // Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will // gain all the benefits of a diagonal matrix, which range from reduced runtime checking to // a considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isDiagonal( decldiag( A ) ); // Will always return true without runtime effort D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal, // i.e. perform an optimized matrix multiplication \endcode // \warning The \c decldiag() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-diagonal matrix // or matrix expression as diagonal via the \c decldiag() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \subsection matrix_operations_declid declid() // // The \c declid() operation can be used to explicitly declare any matrix or matrix expression // as identity matrix: \code blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization B = declid( A ); \endcode // Any matrix or matrix expression that has been declared as identity matrix via \c declid() will // gain all the benefits of an identity matrix, which range from reduced runtime checking to a // considerable speed-up in computations: \code using blaze::DynamicMatrix; using blaze::DiagonalMatrix; DynamicMatrix<double> A, B, C; DiagonalMatrix< DynamicMatrix<double> > D; // ... Resizing and initialization isIdentity( declid( A ) ); // Will always return true without runtime effort D = declid( A ); // Omit any runtime check for A being a diagonal matrix C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an // identity matrix, i.e. perform an optimized matrix multiplication \endcode // \warning The \c declid() operation has the semantics of a cast: The caller is completely // responsible and the system trusts the given information. Declaring a non-identity matrix // or matrix expression as identity matrix via the \c declid() operation leads to undefined // behavior (which can be violated invariants or wrong computation results)! // // // \n \section matrix_operations_matrix_inversion Matrix Inversion // <hr> // // The inverse of a square dense matrix can be computed via the \c inv() function: \code blaze::DynamicMatrix<float,blaze::rowMajor> A, B; // ... Resizing and initialization B = inv( A ); // Compute the inverse of A \endcode // Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert() // function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization invert( A ); // In-place matrix inversion \endcode // Both the \c inv() and the \c invert() functions will automatically select the most suited matrix // inversion algorithm depending on the size and type of the given matrix. For small matrices of // up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices // larger than 6x6 the inversion is performed by means of the most suited matrix decomposition // method: In case of a general matrix the LU decomposition is used, for symmetric matrices the // LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and // for triangular matrices the inverse is computed via a forward or back substitution. // // In case the type of the matrix does not provide additional compile time information about its // structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually // when calling the \c invert() function: \code using blaze::asGeneral; using blaze::asSymmetric; using blaze::asHermitian; using blaze::asLower; using blaze::asUniLower; using blaze::asUpper; using blaze::asUniUpper; using blaze::asDiagonal; invert<asGeneral> ( A ); // In-place inversion of a general matrix invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix invert<asLower> ( A ); // In-place inversion of a lower triangular matrix invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix \endcode // Alternatively, via the \c invert() function it is possible to explicitly specify the inversion // algorithm: \code using blaze::byLU; using blaze::byLDLT; using blaze::byLDLH; using blaze::byLLH; // In-place inversion of a general matrix by means of an LU decomposition invert<byLU>( A ); // In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLT>( A ); // In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition invert<byLDLH>( A ); // In-place inversion of a positive definite matrix by means of a Cholesky decomposition invert<byLLH>( A ); \endcode // Whereas the inversion by means of an LU decomposition works for every general square matrix, // the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is // restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works // for Hermitian positive definite matrices. Please note that it is in the responsibility of the // function caller to guarantee that the selected algorithm is suited for the given matrix. In // case this precondition is violated the result can be wrong and might not represent the inverse // of the given matrix! // // For both the \c inv() and \c invert() function the matrix inversion fails if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // In all failure cases either a compilation error is created if the failure can be predicted at // compile time or a \c std::invalid_argument exception is thrown. // // \note The matrix inversion can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \note It is not possible to use any kind of view on the expression object returned by the // \c inv() function. Also, it is not possible to access individual elements via the function call // operator on the expression object: \code row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression! inv( A )(1,2); // Compilation error: It is not possible to access individual elements! \endcode // \note The inversion functions do not provide any exception safety guarantee, i.e. in case an // exception is thrown the matrix may already have been modified. // // // \n \section matrix_operations_decomposition Matrix Decomposition // <hr> // // \note All decomposition functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can // only be used if a fitting LAPACK library is available and linked to the executable. Otherwise // a linker error will be created. // // \subsection matrix_operations_decomposition_lu LU Decomposition // // The LU decomposition of a dense matrix can be computed via the \c lu() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a row-major matrix assert( A == L * U * P ); \endcode \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P; lu( A, L, U, P ); // LU decomposition of a column-major matrix assert( A == P * L * U ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the // three matrices \c A, \c L and \c U are required to have the same storage order. Also, please // note that the way the permutation matrix \c P needs to be applied differs between row-major and // column-major matrices, since the algorithm uses column interchanges for row-major matrices and // row interchanges for column-major matrices. // // Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates // the LU decomposition of a symmetric matrix into a lower and upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U; blaze::DynamicMatrix<double,blaze::columnMajor> P; lu( A, L, U, P ); // LU decomposition of A \endcode // \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition // // The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; llh( A, L ); // LLH decomposition of a row-major matrix assert( A == L * ctrans( L ) ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A // and \c L can have any storage order. // // Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates // the LLH decomposition of a symmetric matrix into a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; llh( A, L ); // Cholesky decomposition of A \endcode // \n \subsection matrix_operations_decomposition_qr QR Decomposition // // The QR decomposition of a dense matrix can be computed via the \c qr() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::columnMajor> Q; blaze::DynamicMatrix<double,blaze::rowMajor> R; qr( A, Q, R ); // QR decomposition of a row-major matrix assert( A == Q * R ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c R can have any storage order. // // Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates // the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R; qr( A, Q, R ); // QR decomposition of A \endcode // \n \subsection matrix_operations_decomposition_rq RQ Decomposition // // Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via // the \c rq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> R; blaze::DynamicMatrix<double,blaze::columnMajor> Q; rq( A, R, Q ); // RQ decomposition of a row-major matrix assert( A == R * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c R and \c Q can have any storage order. // // Also the \c rq() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the RQ decomposition of an Hermitian matrix into a general // matrix and an upper triangular matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; rq( A, R, Q ); // RQ decomposition of A \endcode // \n \subsection matrix_operations_decomposition_ql QL Decomposition // // The QL decomposition of a dense matrix can be computed via the \c ql() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::DynamicMatrix<double,blaze::columnMajor> L; ql( A, Q, L ); // QL decomposition of a row-major matrix assert( A == Q * L ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c Q and \c L can have any storage order. // // Also the \c ql() function can be used in combination with matrix adaptors. For instance, the // following example demonstrates the QL decomposition of a symmetric matrix into a general // matrix and a lower triangular matrix: \code blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> Q; blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L; ql( A, Q, L ); // QL decomposition of A \endcode // \n \subsection matrix_operations_decomposition_lq LQ Decomposition // // The LQ decomposition of a dense matrix can be computed via the \c lq() function: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> L; blaze::DynamicMatrix<double,blaze::columnMajor> Q; lq( A, L, Q ); // LQ decomposition of a row-major matrix assert( A == L * Q ); \endcode // The function works for both \c rowMajor and \c columnMajor matrices and the three matrices // \c A, \c L and \c Q can have any storage order. // // Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates // the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix: \code blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A; // ... Resizing and initialization blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L; blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q; lq( A, L, Q ); // LQ decomposition of A \endcode // \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors // <hr> // // The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V ); } // namespace blaze \endcode // The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second // function additionally computes the eigenvectors. The eigenvalues are returned in the given vector // \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the // correct dimensions (if possible and necessary). // // Depending on the given matrix type, the resulting eigenvalues are either of floating point // or complex type: In case the given matrix is either a compile time symmetric matrix with // floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues // will be of floating point type and therefore the elements of the given eigenvalue vector are // expected to be of floating point type. In all other cases they are expected to be of complex // type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except // that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having // the positive imaginary part first. // // In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V, // in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of // \a V. In case the given matrix is a compile time symmetric matrix with floating point elements, // the resulting eigenvectors will be of floating point type and therefore the elements of the // given eigenvector matrix are expected to be of floating point type. In all other cases they // are expected to be of complex type. // // The following examples give an impression of the computation of eigenvalues and eigenvectors // for a general, a symmetric, and an Hermitian matrix: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A // ... Initialization DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A // ... Initialization DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors eigen( A, w, V ); \endcode // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // In all failure cases an exception is thrown. // // \note All \c eigen() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of // LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available // and linked to the executable. Otherwise a linker error will be created. // // // \n \section matrix_operations_singularvalues Singular Values/Singular Vectors // <hr> // // The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd() // functions: \code namespace blaze { template< typename MT, bool SO, typename VT, bool TF > void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 > void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST > size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The first and third function compute only singular values of the given general \a m-by-\a n // matrix, the second and fourth function additionally compute singular vectors. The resulting // singular values are returned in the given vector \a s, the left singular vectors are returned // in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s, // \a U, and \a V are resized to the correct dimensions (if possible and necessary). // // The third and fourth function allow for the specification of a subset of singular values and/or // vectors. The number of singular values and vectors to be computed is specified by the lower // bound \a low and the upper bound \a upp, which either form an integral or a floating point // range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // In all failure cases an exception is thrown. // // Examples: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V ); \endcode \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A // ... Initialization DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors DynamicVector<double,columnVector> s; // The vector for the singular values DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors svd( A, U, s, V, 0, 2 ); \endcode // \note All \c svd() functions can only be used for dense matrices with \c float, \c double, // \c complex<float> or \c complex<double> element type. The attempt to call the function with // matrices of any other element type or with a sparse matrix results in a compile time error! // // \note The functions compute the singular values and/or singular vectors of a dense matrix by // means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is // available and linked to the executable. Otherwise a linker error will be created. // // // \n Previous: \ref matrix_types &nbsp; &nbsp; Next: \ref adaptors */ //************************************************************************************************* //**Adaptors*************************************************************************************** /*!\page adaptors Adaptors // // \tableofcontents // // // \section adaptors_general General Concepts // <hr> // // Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the // matrices such that certain invariants are preserved. Due to this adaptors can provide a compile // time guarantee of certain properties, which can be exploited for optimized performance. // // The \b Blaze library provides a total of 9 different adaptors: // // <ul> // <li> \ref adaptors_symmetric_matrices </li> // <li> \ref adaptors_hermitian_matrices </li> // <li> \ref adaptors_triangular_matrices // <ul> // <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_lowermatrix </li> // <li> \ref adaptors_triangular_matrices_unilowermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_uppermatrix </li> // <li> \ref adaptors_triangular_matrices_uniuppermatrix </li> // <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li> // </ul> // </li> // <li> \ref adaptors_triangular_matrices "Diagonal Matrices" // <ul> // <li> \ref adaptors_triangular_matrices_diagonalmatrix </li> // </ul> // </li> // </ul> // </li> // </ul> // // In combination with the general matrix types, \b Blaze provides a total of 40 different matrix // types that make it possible to exactly adapt the type of matrix to every specific problem. // // // \n \section adaptors_examples Examples // <hr> // // The following code examples give an impression on the use of adaptors. The first example shows // the multiplication between two lower matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant // performance advantage in comparison to a general matrix multiplication, especially for large // matrices. // // The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse // vector multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which significantly increases the performance. // // \n Previous: \ref matrix_operations &nbsp; &nbsp; Next: \ref adaptors_symmetric_matrices */ //************************************************************************************************* //**Symmetric Matrices***************************************************************************** /*!\page adaptors_symmetric_matrices Symmetric Matrices // // \tableofcontents // // // \n \section adaptors_symmetric_matrices_general Symmetric Matrices // <hr> // // In contrast to general matrices, which have no restriction in their number of rows and columns // and whose elements can have any value, symmetric matrices provide the compile time guarantee // to be square matrices with pair-wise identical values. Mathematically, this means that a // symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal // values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can // be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze // library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix // class template. // // // \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix // <hr> // // The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it // by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its // transpose \f$ A = A^T \f$). It can be included via the header file \code #include <blaze/math/SymmetricMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class SymmetricMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible symmetric matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense symmetric matrix with static memory blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense symmetric matrix based on HybridMatrix blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision symmetric matrix blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E; \endcode // The storage order of a symmetric matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix // will also be a column-major matrix. // // // \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices // <hr> // // A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the symmetry constraint: // // -# <b>\ref adaptors_symmetric_matrices_square</b> // -# <b>\ref adaptors_symmetric_matrices_symmetry</b> // -# <b>\ref adaptors_symmetric_matrices_initialization</b> // // \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 symmetric static matrix SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced! // // This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are // symmetric themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Default constructed, row-major 3x3 symmetric compressed matrix SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); // Initializing three elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1) A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator *A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a symmetric dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-symmetric dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 }, { 8.0, 0.0, -1.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; symmetric invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; // Setup of the symmetric matrix // // ( 0 1 3 ) // A = ( 1 2 0 ) // ( 3 0 0 ) // SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0) A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1) A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2) \endcode // The symmetry property is also enforced for symmetric custom matrices: In case the given array // of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::SymmetricMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomSymmetric = SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 symmetric custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomSymmetric A( array, 3UL ); // OK // Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomSymmetric B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the // symmetric matrix. The following example demonstrates that modifying the elements of an entire // row of the symmetric matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of the symmetric matrix // // ( 0 1 0 2 ) // A = ( 1 3 4 0 ) // ( 0 4 0 5 ) // ( 2 0 5 0 ) // SymmetricMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = 1; A(0,3) = 2; A(1,1) = 3; A(1,2) = 4; A(2,3) = 5; // Setting all elements in the 1st row to 0 results in the matrix // // ( 0 0 0 2 ) // A = ( 0 0 0 0 ) // ( 0 0 0 5 ) // ( 2 0 5 0 ) // row( A, 1 ) = 0; \endcode // The next example demonstrates the (compound) assignment to submatrices of symmetric matrices. // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the symmetric property of // dense symmetric matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major symmetric dynamic matrix SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A SymmetricMatrix matrix can participate in numerical operations in any way any other dense // or sparse matrix can participate. It can also be combined with any other dense or sparse vector // or matrix. The following code example gives an impression of the use of SymmetricMatrix within // arithmetic operations: \code using blaze::SymmetricMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix // to be assigned is not symmetric at compile time, a runtime check is performed. // // // \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices // <hr> // // It is also possible to use symmetric block matrices: \code using blaze::CompressedMatrix; using blaze::StaticMatrix; using blaze::SymmetricMatrix; // Definition of a 3x3 symmetric block matrix based on CompressedMatrix SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 ); \endcode // Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and // guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also // applied to element \f$ a_{ji} \f$: \code // Inserting the elements (2,4) and (4,2) A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } } ); // Manipulating the elements (2,4) and (4,2) A(2,4)(1,1) = -5; \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_symmetric_matrices_performance Performance Considerations // <hr> // // When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor // instead of a general matrix can be a considerable performance advantage. The \b Blaze library // tries to exploit the properties of symmetric matrices whenever possible. However, there are // also situations when using a symmetric matrix introduces some overhead. The following examples // demonstrate several situations where symmetric matrices can positively or negatively impact // performance. // // \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::rowMajor; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; SymmetricMatrix< CompressedMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the // SymmetricMatrix adapter is obviously an advantage. // // \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::rowMajor; using blaze::columnVector; SymmetricMatrix< DynamicMatrix<double,rowMajor> > A; CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; using blaze::columnMajor; SymmetricMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a symmetric matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not symmetric at compile time: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; SymmetricMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the symmetric matrix; no performance penalty C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead \endcode // When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary // to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property // of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two symmetric matrices does not necessarily result in another symmetric matrix: \code SymmetricMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a symmetric matrix; no runtime overhead C = A - B; // Results in a symmetric matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead \endcode // \n Previous: \ref adaptors &nbsp; &nbsp; Next: \ref adaptors_hermitian_matrices */ //************************************************************************************************* //**Hermitian Matrices***************************************************************************** /*!\page adaptors_hermitian_matrices Hermitian Matrices // // \tableofcontents // // // \n \section adaptors_hermitian_matrices_general Hermitian Matrices // <hr> // // In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices. // Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise // conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal // to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have // a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze // library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix // class template. // // // \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix // <hr> // // The HermitianMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to // its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file \code #include <blaze/math/HermitianMatrix.h> \endcode // The type of the adapted matrix can be specified via template parameter: \code template< typename MT > class HermitianMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible Hermitian matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense Hermitian matrix with static memory blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C; // Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision complex Hermitian matrix blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E; \endcode // The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as // blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix // will also be a column-major matrix. // // // \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices // // The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits. // However, there are a couple of differences, both from a mathematical point of view as well as // from an implementation point of view. // // From a mathematical point of view, a matrix is called symmetric when it is equal to its // transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate // transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two // conditions coincide, which means that symmetric matrices of real values are also Hermitian // and Hermitian matrices of real values are also symmetric. // // From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data // types (i.e. all integral types except \c bool, floating point and complex types), whereas // symmetric matrices can also be block matrices (i.e. can have vector or matrix elements). // For built-in element types, the HermitianMatrix adaptor behaves exactly like the according // SymmetricMatrix implementation. For complex element types, however, the Hermitian property // is enforced (see also \ref adaptors_hermitian_matrices_hermitian). \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::HermitianMatrix; using blaze::SymmetricMatrix; // The following two matrices provide an identical experience (including performance) HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric // The following two matrices will behave differently HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric // Hermitian block matrices are not allowed HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error! SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix \endcode // \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices // <hr> // // A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the Hermitian symmetry constraint: // // -# <b>\ref adaptors_hermitian_matrices_square</b> // -# <b>\ref adaptors_hermitian_matrices_hermitian</b> // -# <b>\ref adaptors_hermitian_matrices_initialization</b> // // \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 Hermitian static matrix HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced! // // This means that the following properties of a Hermitian matrix are always guaranteed: // // - The diagonal elements are real numbers, i.e. the imaginary part is zero // - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$ // // Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its // counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that // are Hermitian themselves: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Default constructed, row-major 3x3 Hermitian compressed matrix HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); // Initializing the matrix via the function call operator // // ( (1, 0) (0,0) (2,1) ) // ( (0, 0) (0,0) (0,0) ) // ( (2,-1) (0,0) (0,0) ) // A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0) A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0) // Inserting three more elements via the insert() function // // ( (1,-3) (0,0) (2, 1) ) // ( (0, 0) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1) A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1) // Access via a non-const iterator // // ( (1,-3) (8,1) (2, 1) ) // ( (8,-1) (2,0) (4,-2) ) // ( (2,-1) (4,2) (0, 0) ) // *A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1) // Erasing elements via the erase() function // // ( (0, 0) (8,1) (0, 0) ) // ( (8,-1) (2,0) (4,-2) ) // ( (0, 0) (4,2) (0, 0) ) // A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0) // Construction from a Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) }, { cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-Hermitian dense matrix StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) }, { cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) }, { cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } }; C = D; // Throws an exception; Hermitian invariant would be violated! \endcode // The same restriction also applies to the \c append() function for sparse matrices: Appending // the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix. // Despite the additional insertion, the \c append() function still provides the most efficient // way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the // capacity of the individual rows/columns of the matrix should to be specifically prepared with // \c reserve() calls: \code using blaze::CompressedMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,2) (3,-4) ) // A = ( (1,-2) (2,0) (0, 0) ) // ( (3, 4) (0,0) (0, 0) ) // HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 ); A.reserve( 5 ); // Reserving enough space for 5 non-zero elements A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0) A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1) A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2) \endcode // The Hermitian property is also enforced for Hermitian custom matrices: In case the given array // of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is // thrown: \code using blaze::CustomMatrix; using blaze::HermitianMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomHermitian = HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 Hermitian custom matrix from a properly initialized array double array[9] = { 1.0, 2.0, 4.0, 2.0, 3.0, 5.0, 4.0, 5.0, 6.0 }; CustomHermitian A( array, 3UL ); // OK // Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomHermitian B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the // Hermitian matrix. The following example demonstrates that modifying the elements of an entire // row of the Hermitian matrix also affects the counterpart elements in the according column of // the matrix: \code using blaze::DynamicMatrix; using blaze::HermtianMatrix; using cplx = std::complex<double>; // Setup of the Hermitian matrix // // ( (0, 0) (1,-1) (0,0) (2, 1) ) // A = ( (1, 1) (3, 0) (4,2) (0, 0) ) // ( (0, 0) (4,-2) (0,0) (5,-3) ) // ( (2,-1) (0, 0) (5,3) (0, 0) ) // HermitianMatrix< DynamicMatrix<int> > A( 4 ); A(0,1) = cplx( 1.0, -1.0 ); A(0,3) = cplx( 2.0, 1.0 ); A(1,1) = cplx( 3.0, 0.0 ); A(1,2) = cplx( 4.0, 2.0 ); A(2,3) = cplx( 5.0, 3.0 ); // Setting all elements in the 1st row to 0 results in the matrix // // ( (0, 0) (0,0) (0,0) (2, 1) ) // A = ( (0, 0) (0,0) (0,0) (0, 0) ) // ( (0, 0) (0,0) (0,0) (5,-3) ) // ( (2,-1) (0,0) (5,3) (0, 0) ) // row( A, 1 ) = cplx( 0.0, 0.0 ); \endcode // The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices. // Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian // symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; std::complex<double> cplx; // Setup of two default 4x4 Hermitian matrices HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( (1,-1) (2, 5) ) // B = ( (3, 0) (4,-6) ) // ( (5, 0) (6, 0) ) // DynamicMatrix<int> B( 3UL, 2UL ); B(0,0) = cplx( 1.0, -1.0 ); B(0,1) = cplx( 2.0, 5.0 ); B(1,0) = cplx( 3.0, 0.0 ); B(1,1) = cplx( 4.0, -6.0 ); B(2,1) = cplx( 5.0, 0.0 ); B(2,2) = cplx( 6.0, 7.0 ); // OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved // // ( (0, 0) (0, 0) (1,-1) (2, 5) ) // A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) ) // ( (1, 1) (3, 0) (5, 0) (6, 0) ) // ( (2,-5) (4, 6) (6, 0) (0, 0) ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved! // The elements marked with X cannot be assigned unambiguously! // // ( (0, 0) (1,-1) (2,5) (0,0) ) // A2 = ( (1, 1) (3, 0) (X,X) (0,0) ) // ( (2,-5) (X, X) (6,0) (0,0) ) // ( (0, 0) (0, 0) (0,0) (0,0) ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency (especially in case all default values are // overridden afterwards), this property is important since otherwise the Hermitian property of // dense Hermitian matrices could not be guaranteed: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // Default initialized, 5x5 row-major Hermitian dynamic matrix HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); \endcode // \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A HermitianMatrix can be used within all numerical operations in any way any other dense or // sparse matrix can be used. It can also be combined with any other dense or sparse vector or // matrix. The following code example gives an impression of the use of HermitianMatrix within // arithmetic operations: \code using blaze::HermitianMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; using cplx = complex<float>; DynamicMatrix<cplx,rowMajor> A( 3, 3 ); CompressedMatrix<cplx,rowMajor> B( 3, 3 ); HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 ); HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 ); HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E; HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix // to be assigned is not Hermitian at compile time, a runtime check is performed. // // // \n \section adaptors_hermitian_matrices_performance Performance Considerations // <hr> // // When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor // instead of a general matrix can be a considerable performance advantage. This is particularly // true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The // \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever // possible. However, there are also situations when using a Hermitian matrix introduces some // overhead. The following examples demonstrate several situations where Hermitian matrices can // positively or negatively impact performance. // // \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact // that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the // multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix // multiplication: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited // for maximum performance. However, \b Blaze evaluates the multiplication as \code C = A * trans( B ); \endcode // which significantly increases the performance since in contrast to the original formulation the // optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a // symmetric matrix is obviously an advantage. // // \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar optimization is possible in case of matrix/vector multiplications: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::CompressedVector; using blaze::HermitianMatrix; using blaze::rowMajor; using blaze::columnVector; HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric CompressedVector<double,columnVector> x; DynamicVector<double,columnVector> y; // ... Resizing and initialization y = A * x; \endcode // In this example it is not intuitively apparent that using a row-major matrix is not the best // possible choice in terms of performance since the computation cannot be vectorized. Choosing // a column-major matrix instead, however, would enable a vectorized computation. Therefore // \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and // evaluates the multiplication as \code y = trans( A ) * x; \endcode // which also significantly increases the performance. // // \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices // // Another example is the optimization of a row view on a column-major symmetric matrix: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; using blaze::columnMajor; HermitianMatrix< DynamicMatrix<double,columnMajor> > A( 10UL ); // Both Hermitian and symmetric auto row5 = row( A, 5UL ); \endcode // Usually, a row view on a column-major matrix results in a considerable performance decrease in // comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix // elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of // the matrix, which provides the same performance as if the matrix would be row-major. Note that // this also works for column views on row-major matrices, where \b Blaze can use the according // row instead of a column in order to provide maximum performance. // // \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read // access), which introduces absolutely no performance penalty, using a Hermitian matrix on the // left-hand side of an assignment (i.e. for write access) may introduce additional overhead when // it is assigned a general matrix, which is not Hermitian at compile time: \code using blaze::DynamicMatrix; using blaze::HermitianMatrix; HermitianMatrix< DynamicMatrix< complex<double> > > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the Hermitian matrix; no performance penalty C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead \endcode // When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary // to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property // of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as // possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is // therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n // In this context it is especially noteworthy that in contrast to additions and subtractions the // multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix: \code HermitianMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a Hermitian matrix; no runtime overhead C = A - B; // Results in a Hermitian matrix; no runtime overhead C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead \endcode // \n Previous: \ref adaptors_symmetric_matrices &nbsp; &nbsp; Next: \ref adaptors_triangular_matrices */ //************************************************************************************************* //**Triangular Matrices**************************************************************************** /*!\page adaptors_triangular_matrices Triangular Matrices // // \tableofcontents // // // \n \section adaptors_triangular_matrices_general Triangular Matrices // <hr> // // Triangular matrices come in three flavors: Lower triangular matrices provide the compile time // guarantee to be square matrices and that the upper part of the matrix contains only default // elements that cannot be modified. Upper triangular matrices on the other hand provide the // compile time guarantee to be square and that the lower part of the matrix contains only fixed // default elements. Finally, diagonal matrices provide the compile time guarantee to be square // and that both the lower and upper part of the matrix contain only immutable default elements. // These properties can be exploited to gain higher performance and/or to save memory. Within the // \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized // by the following class templates: // // Lower triangular matrices: // - <b>\ref adaptors_triangular_matrices_lowermatrix</b> // - <b>\ref adaptors_triangular_matrices_unilowermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b> // // Upper triangular matrices: // - <b>\ref adaptors_triangular_matrices_uppermatrix</b> // - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b> // - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b> // // Diagonal matrices // - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b> // // // \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix // <hr> // // The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower // triangular matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/LowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class LowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower matrices: \code using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using blaze::columnMajor; // Definition of a 3x3 row-major dense lower matrix with static memory blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A; // Definition of a resizable column-major dense lower matrix based on HybridMatrix blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B; // Definition of a resizable row-major dense lower matrix based on DynamicMatrix blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C; // Definition of a fixed size row-major dense lower matrix based on CustomMatrix blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D; // Definition of a compressed row-major single precision lower matrix blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E; \endcode // The storage order of a lower matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix // <hr> // // The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements above the diagonal are 0 (lower unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 1 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible lower unitriangular matrices: \code // Definition of a 3x3 row-major dense unilower matrix with static memory blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense unilower matrix based on HybridMatrix blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense unilower matrix based on DynamicMatrix blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision unilower matrix blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a lower unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the unilower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix // <hr> // // The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements above the diagonal are 0 (strictly lower triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & 0 & 0 & \cdots & 0 \\ l_{1,0} & 0 & 0 & \cdots & 0 \\ l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyLowerMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyLowerMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly lower triangular matrices: \code // Definition of a 3x3 row-major dense strictly lower matrix with static memory blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly lower matrix blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly lower triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix. // Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly lower matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix // <hr> // // The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types. // It inherits the properties and the interface of the given matrix type \c MT and extends it by // enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper // triangular matrix): \f[\left(\begin{array}{*{5}{c}} u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & u_{2,2} & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & u_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper matrices: \code // Definition of a 3x3 row-major dense upper matrix with static memory blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense upper matrix based on HybridMatrix blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense upper matrix based on DynamicMatrix blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision upper matrix blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix // <hr> // // The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix // elements below the diagonal are 0 (upper unitriangular matrix): \f[\left(\begin{array}{*{5}{c}} 1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 1 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 1 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 1 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/UniUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class UniUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also, // the given matrix type must have numeric element types (i.e. all integral types except \c bool, // floating point and complex types). Note that the given matrix type must be either resizable (as // for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as // for instance blaze::StaticMatrix). // // The following examples give an impression of several possible upper unitriangular matrices: \code // Definition of a 3x3 row-major dense uniupper matrix with static memory blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense uniupper matrix based on HybridMatrix blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision uniupper matrix blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of an upper unitriangular matrix is depending on the storage order of the // adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the uniupper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix // <hr> // // The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all diagonal matrix elements and all matrix // elements below the diagonal are 0 (strictly upper triangular matrix): \f[\left(\begin{array}{*{5}{c}} 0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\ 0 & 0 & u_{1,2} & \cdots & u_{1,N} \\ 0 & 0 & 0 & \cdots & u_{2,N} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & 0 \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/StrictlyUpperMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class StrictlyUpperMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used // with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix // type. Note that the given matrix type must be either resizable (as for instance // blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance // blaze::StaticMatrix). // // The following examples give an impression of several possible strictly upper triangular matrices: \code // Definition of a 3x3 row-major dense strictly upper matrix with static memory blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision strictly upper matrix blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a strictly upper triangular matrix is depending on the storage order of // the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. // is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix. // Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor), // the strictly upper matrix will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix // <hr> // // The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix // types. It inherits the properties and the interface of the given matrix type \c MT and extends // it by enforcing the additional invariant that all matrix elements above and below the diagonal // are 0 (diagonal matrix): \f[\left(\begin{array}{*{5}{c}} l_{0,0} & 0 & 0 & \cdots & 0 \\ 0 & l_{1,1} & 0 & \cdots & 0 \\ 0 & 0 & l_{2,2} & \cdots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & \cdots & l_{N,N} \\ \end{array}\right).\f] // It can be included via the header file \code #include <blaze/math/DiagonalMatrix.h> \endcode // The type of the adapted matrix can be specified via the first template parameter: \code template< typename MT > class DiagonalMatrix; \endcode // \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any // non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note // that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or // blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix). // // The following examples give an impression of several possible diagonal matrices: \code // Definition of a 3x3 row-major dense diagonal matrix with static memory blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A; // Definition of a resizable column-major dense diagonal matrix based on HybridMatrix blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B; // Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C; // Definition of a compressed row-major single precision diagonal matrix blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D; \endcode // The storage order of a diagonal matrix is depending on the storage order of the adapted matrix // type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified // as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the // adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix // will also be a column-major matrix. // // // \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices // <hr> // // A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT. // It also provides (nearly) the same interface as the underlying matrix type. However, there are // some important exceptions resulting from the triangular matrix constraint: // // -# <b>\ref adaptors_triangular_matrices_square</b> // -# <b>\ref adaptors_triangular_matrices_triangular</b> // -# <b>\ref adaptors_triangular_matrices_initialization</b> // -# <b>\ref adaptors_triangular_matrices_storage</b> // -# <b>\ref adaptors_triangular_matrices_scaling</b> // // \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square! // // In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix, // or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and // the \c extend() functions only expect a single parameter, which specifies both the number of // rows and columns, instead of two (one for the number of rows and one for the number of columns): \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; // Default constructed, default initialized, row-major 3x3 lower dynamic matrix LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 ); // Resizing the matrix to 5x5 A.resize( 5 ); // Extending the number of rows and columns by 2, resulting in a 7x7 matrix A.extend( 2 ); \endcode // In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number // of rows and number of columns must be specified equally: \code using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::columnMajor; // Correct setup of a fixed size column-major 3x3 lower static matrix LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A; // Compilation error: the provided matrix type is not a square matrix type LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B; \endcode // \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced! // // This means that it is only allowed to modify elements in the lower part or the diagonal of // a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix. // Unitriangular and strictly triangular matrices are even more restrictive and don't allow the // modification of diagonal elements. Also, triangular matrices can only be assigned matrices that // don't violate their triangular property. The following example demonstrates this restriction // by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types // see the according class documentations. \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using CompressedLower = LowerMatrix< CompressedMatrix<double,rowMajor> >; // Default constructed, row-major 3x3 lower compressed matrix CompressedLower A( 3 ); // Initializing elements via the function call operator A(0,0) = 1.0; // Initialization of the diagonal element (0,0) A(2,0) = 2.0; // Initialization of the lower element (2,0) A(1,2) = 9.0; // Throws an exception; invalid modification of upper element // Inserting two more elements via the insert() function A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0) A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1) A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element // Appending an element via the append() function A.reserve( 1, 3 ); // Reserving enough capacity in row 1 A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1) A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part // Access via a non-const iterator CompressedLower::Iterator it = A.begin(1); *it = 6.0; // Modifies the lower element (1,0) ++it; *it = 9.0; // Modifies the diagonal element (1,1) // Erasing elements via the erase() function A.erase( 0, 0 ); // Erasing the diagonal element (0,0) A.erase( 2, 0 ); // Erasing the lower element (2,0) // Construction from a lower dense matrix StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK // Assignment of a non-lower dense matrix StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 }, { 8.0, 0.0, 0.0 }, { -2.0, -1.0, 4.0 } }; C = D; // Throws an exception; lower matrix invariant would be violated! \endcode // The triangular property is also enforced during the construction of triangular custom matrices: // In case the given array of elements does not represent the according triangular matrix type, a // \c std::invalid_argument exception is thrown: \code using blaze::CustomMatrix; using blaze::LowerMatrix; using blaze::unaligned; using blaze::unpadded; using blaze::rowMajor; using CustomLower = LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> >; // Creating a 3x3 lower custom matrix from a properly initialized array double array[9] = { 1.0, 0.0, 0.0, 2.0, 3.0, 0.0, 4.0, 5.0, 6.0 }; CustomLower A( array, 3UL ); // OK // Attempt to create a second 3x3 lower custom matrix from an uninitialized array std::unique_ptr<double[]> memory( new double[9UL] ); CustomLower B( memory.get(), 3UL ); // Throws an exception \endcode // Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...) // on the triangular matrix. The following example demonstrates that modifying the elements of an // entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements. // Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types // see the according class documentations. \code using blaze::DynamicMatrix; using blaze::LowerMatrix; // Setup of the lower matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 0 3 0 0 ) // ( 4 0 5 0 ) // LowerMatrix< DynamicMatrix<int> > A( 4 ); A(1,0) = 1; A(1,1) = 2; A(2,1) = 3; A(3,0) = 4; A(3,2) = 5; // Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix // // ( 0 0 0 0 ) // A = ( 1 2 0 0 ) // ( 9 9 9 0 ) // ( 4 0 5 0 ) // row( A, 2 ) = 9; // Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in // // ( 0 0 0 0 ) // A = ( 1 7 0 0 ) // ( 9 7 7 0 ) // ( 4 7 7 0 ) // submatrix( A, 0, 1, 4, 2 ) = 7; \endcode // The next example demonstrates the (compound) assignment to rows/columns and submatrices of // triangular matrices. Since only lower/upper and potentially diagonal elements may be modified // the matrix to be assigned must be structured such that the triangular matrix invariant of the // matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::LowerMatrix; using blaze::rowVector; // Setup of two default 4x4 lower matrices LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of a 4-dimensional vector // // v = ( 1 2 3 0 ) // DynamicVector<int,rowVector> v{ 1, 2, 3, 0 }; // OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant // // ( 0 0 0 0 ) // A1 = ( 0 0 0 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 2 ) = v; // OK // Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element // marked with X cannot be assigned and triggers an exception. // // ( 0 0 0 0 ) // A1 = ( 1 2 X 0 ) // ( 1 2 3 0 ) // ( 0 0 0 0 ) // row( A1, 1 ) = v; // Assignment throws an exception! // Setup of the 3x2 dynamic matrix // // ( 0 0 ) // B = ( 7 0 ) // ( 8 9 ) // DynamicMatrix<int> B( 3UL, 2UL, 0 ); B(1,0) = 7; B(2,0) = 8; B(2,1) = 9; // OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved // // ( 0 0 0 0 ) // A2 = ( 0 7 0 0 ) // ( 0 8 9 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be // preserved! The elements marked with X cannot be assigned without violating the invariant! // // ( 0 0 0 0 ) // A2 = ( 0 7 X 0 ) // ( 0 8 8 X ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized! // // Although this results in a small loss of efficiency during the creation of a dense lower or // upper matrix this initialization is important since otherwise the lower/upper matrix property // of dense lower matrices would not be guaranteed: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Uninitialized, 5x5 row-major dynamic matrix DynamicMatrix<int,rowMajor> A( 5, 5 ); // 5x5 row-major lower dynamic matrix with default initialized upper matrix LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 ); // 7x7 column-major upper dynamic matrix with default initialized lower matrix UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 ); // 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 ); \endcode // \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements! // // All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable // elements in the lower or upper part, respectively. Therefore dense triangular matrices don't // provide any kind of memory reduction! There are two main reasons for this: First, storing also // the zero elements guarantees maximum performance for many algorithms that perform vectorized // operations on the triangular matrices, which is especially true for small dense matrices. // Second, conceptually all triangular adaptors merely restrict the interface to the matrix type // \c MT and do not change the data layout or the underlying matrix type. // // This property matters most for diagonal matrices. In order to achieve the perfect combination // of performance and memory consumption for a diagonal matrix it is recommended to use dense // matrices for small diagonal matrices and sparse matrices for large diagonal matrices: \code // Recommendation 1: use dense matrices for small diagonal matrices using SmallDiagonalMatrix = blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> >; // Recommendation 2: use sparse matrices for large diagonal matrices using LargeDiagonalMatrix = blaze::DiagonalMatrix< blaze::CompressedMatrix<float> >; \endcode // \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled! // // Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible // to self-scale such a matrix: \code using blaze::DynamicMatrix; using blaze::UniLowerMatrix; UniLowerMatrix< DynamicMatrix<int> > A( 4 ); A *= 2; // Compilation error; Scale operation is not available on an unilower matrix A /= 2; // Compilation error; Scale operation is not available on an unilower matrix A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix \endcode // \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations // <hr> // // A lower and upper triangular matrix can participate in numerical operations in any way any other // dense or sparse matrix can participate. It can also be combined with any other dense or sparse // vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix // within arithmetic operations: \code using blaze::LowerMatrix; using blaze::DynamicMatrix; using blaze::HybridMatrix; using blaze::StaticMatrix; using blaze::CompressedMatrix; using blaze::rowMajor; using blaze::columnMajor; DynamicMatrix<double,rowMajor> A( 3, 3 ); CompressedMatrix<double,rowMajor> B( 3, 3 ); LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 ); LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 ); LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E; LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F; E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check) F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check) F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check) C *= 2.0; // In-place scaling of matrix C E = 2.0 * B; // Scaling of matrix B (includes runtime check) F = C * 2.0; // Scaling of matrix C (only compile time check) E += A - B; // Addition assignment (includes runtime check) F -= C + D; // Subtraction assignment (only compile time check) F *= A * D; // Multiplication assignment (includes runtime check) \endcode // Note that it is possible to assign any kind of matrix to a triangular matrix. In case the // matrix to be assigned does not satisfy the invariants of the triangular matrix at compile // time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular // and strictly triangular matrix types can be used in the same way, but may pose some additional // restrictions (see the according class documentations). // // // \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices // <hr> // // It is also possible to use triangular block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::LowerMatrix; using blaze::UpperMatrix; // Definition of a 5x5 lower block matrix based on DynamicMatrix LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Definition of a 7x7 upper block matrix based on CompressedMatrix UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to // manipulate elements in the upper part (lower triangular matrix) or the lower part (upper // triangular matrix) of the matrix: \code const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 }, { 6, 8, -3 }, { 2, -1, 2 } }; A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception \endcode // Note that unitriangular matrices are restricted to numeric element types and therefore cannot // be used for block matrices: \code using blaze::CompressedMatrix; using blaze::DynamicMatrix; using blaze::StaticMatrix; using blaze::UniLowerMatrix; using blaze::UniUpperMatrix; // Compilation error: lower unitriangular matrices are restricted to numeric element types UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 ); // Compilation error: upper unitriangular matrices are restricted to numeric element types UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 ); \endcode // For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices. // // // \n \section adaptors_triangular_matrices_performance Performance Considerations // <hr> // // The \b Blaze library tries to exploit the properties of lower and upper triangular matrices // whenever and wherever possible. Therefore using triangular matrices instead of a general // matrices can result in a considerable performance improvement. However, there are also // situations when using a triangular matrix introduces some overhead. The following examples // demonstrate several common situations where triangular matrices can positively or negatively // impact performance. // // \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication // // When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the // fact that either the lower or upper part of the matrix contains only default elements and // restrict the algorithm to the non-zero elements. The following example demonstrates this by // means of a dense matrix/dense matrix multiplication with lower triangular matrices: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; using blaze::rowMajor; using blaze::columnMajor; LowerMatrix< DynamicMatrix<double,rowMajor> > A; LowerMatrix< DynamicMatrix<double,columnMajor> > B; DynamicMatrix<double,columnMajor> C; // ... Resizing and initialization C = A * B; \endcode // In comparison to a general matrix multiplication, the performance advantage is significant, // especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix // and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular, // respectively. Note however that the performance advantage is most pronounced for dense matrices // and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication // // A similar performance improvement can be gained when using a triangular matrix in a matrix/vector // multiplication: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; LowerMatrix< DynamicMatrix<double,rowMajor> > A; DynamicVector<double,columnVector> x, y; // ... Resizing and initialization y = A * x; \endcode // In this example, \b Blaze also exploits the structure of the matrix and approx. halves the // runtime of the multiplication. Also in case of matrix/vector multiplications the performance // improvement is most pronounced for dense matrices and much less so for sparse matrices. // // \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix // // In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for // read access), which introduces absolutely no performance penalty, using a triangular matrix // on the left-hand side of an assignment (i.e. for write access) may introduce additional // overhead when it is assigned a general matrix, which is not triangular at compile time: \code using blaze::DynamicMatrix; using blaze::LowerMatrix; LowerMatrix< DynamicMatrix<double> > A, C; DynamicMatrix<double> B; B = A; // Only read-access to the lower matrix; no performance penalty C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead \endcode // When assigning a general (potentially not lower triangular) matrix to a lower matrix or a // general (potentially not upper triangular) matrix to an upper matrix it is necessary to check // whether the matrix is lower or upper at runtime in order to guarantee the triangular property // of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as // efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime // overhead it is therefore generally advisable to assign lower or upper triangular matrices to // other lower or upper triangular matrices.\n // In this context it is especially noteworthy that the addition, subtraction, and multiplication // of two triangular matrices of the same structure always results in another triangular matrix: \code LowerMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a lower matrix; no runtime overhead C = A - B; // Results in a lower matrix; no runtime overhead C = A * B; // Results in a lower matrix; no runtime overhead \endcode \code UpperMatrix< DynamicMatrix<double> > A, B, C; C = A + B; // Results in a upper matrix; no runtime overhead C = A - B; // Results in a upper matrix; no runtime overhead C = A * B; // Results in a upper matrix; no runtime overhead \endcode // \n Previous: \ref adaptors_hermitian_matrices &nbsp; &nbsp; Next: \ref views */ //************************************************************************************************* //**Views****************************************************************************************** /*!\page views Views // // \tableofcontents // // // \section views_general General Concepts // <hr> // // Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific // row, column, or band of a matrix. As such, views act as a reference to specific elements of // a vector or matrix. This reference is valid and can be used in every way as any other vector // or matrix can be used as long as the referenced vector or matrix is not resized or entirely // destroyed. Views also act as alias to the elements of the vector or matrix: Changes made to the // elements (e.g. modifying values, inserting or erasing elements) via the view are immediately // visible in the vector or matrix and changes made via the vector or matrix are immediately // visible in the view. // // It is also possible to create nested views (compound views), such as for instance bands of // submatrices or row selections on column selections. A compound view also acts as reference // to specific elements of the underlying vector or matrix and is valid as long as the underlying, // referenced vector or matrix is not resized or entirely destroyed. // // The \b Blaze library provides the following views on vectors and matrices: // // Vector views: // - \ref views_subvectors // - \ref views_element_selections // // Matrix views: // - \ref views_submatrices // - \ref views_rows // - \ref views_row_selections // - \ref views_columns // - \ref views_column_selections // - \ref views_bands // // // \n \section views_examples Examples \code using blaze::DynamicMatrix; using blaze::StaticVector; // Setup of the 3x5 row-major matrix DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 }, { 0, 2, 5, -1, -1 }, { 1, 0, 0, 2, 1 } }; // Setup of the 2-dimensional row vector StaticVector<int,2UL,rowVector> vec{ 18, 19 }; // Assigning to the elements (1,2) and (1,3) via a subvector of a row // // ( 1 0 -2 3 0 ) // ( 0 2 18 19 -1 ) // ( 1 0 0 2 1 ) // subvector( row( A, 1UL ), 2UL, 2UL ) = vec; // Switching rows 0 and 2 of A // // ( 1 0 0 2 1 ) // ( 0 2 18 19 -1 ) // ( 1 0 -2 3 0 ) // rows<0,2>( A ) = rows<2,0>( A ); // Warning: It is the programmer's responsibility to ensure the view does not outlive // the viewed vector or matrix (dangling reference)! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 } } ); \endcode // \n Previous: \ref adaptors_triangular_matrices &nbsp; &nbsp; Next: \ref views_subvectors */ //************************************************************************************************* //**Subvectors************************************************************************************* /*!\page views_subvectors Subvectors // // \tableofcontents // // // Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors // act as a reference to a specific range within a vector. This reference is valid and can be // used in every way any other dense or sparse vector can be used as long as the vector containing // the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the // vector elements in the specified range: Changes made to the elements (e.g. modifying values, // inserting or erasing elements) are immediately visible in the vector and changes made via the // vector are immediately visible in the subvector. // // // \n \section views_subvectors_setup Setup of Subvectors // <hr> // // A view on a dense or sparse subvector can be created very conveniently via the \c subvector() // function. It can be included via the header file \code #include <blaze/math/Subvector.h> \endcode // The first parameter specifies the offset of the subvector within the underlying dense or sparse // vector, the second parameter specifies the size of the subvector. The two parameters can be // specified either at compile time or at runtime: \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Create a subvector from index 4 with a size of 12 (i.e. in the range [4..15]) (compile time arguments) auto sv1 = subvector<4UL,12UL>( x ); // Create a subvector from index 8 with a size of 16 (i.e. in the range [8..23]) (runtime arguments) auto sv2 = subvector( x, 8UL, 16UL ); \endcode // The \c subvector() function returns an expression representing the subvector view. The type of // this expression depends on the given subvector arguments, primarily the type of the vector and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using VectorType = blaze::DynamicVector<int>; using SubvectorType = decltype( blaze::subvector<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A subvector created // from a row vector can be used as any other row vector, a subvector created from a column vector // can be used as any other column vector. The view can also be used on both sides of an assignment: // The subvector can either be used as an alias to grant write access to a specific subvector of a // vector primitive on the left-hand side of an assignment or to grant read-access to a specific // subvector of a vector primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Create a subvector from index 0 with a size of 10 (i.e. in the range [0..9]) auto sv = subvector( x, 0UL, 10UL ); // Setting the first ten elements of x to the 2nd row of matrix A sv = row( A, 2UL ); // Setting the second ten elements of x to y subvector( x, 10UL, 10UL ) = y; // Setting the 3rd row of A to a subvector of x row( A, 3UL ) = subvector( x, 3UL, 10UL ); // Setting x to a subvector of the result of the addition between y and the 1st row of A x = subvector( y + row( A, 1UL ), 2UL, 5UL ); \endcode // \warning It is the programmer's responsibility to ensure the subvector does not outlive the // viewed vector: \code // Creating a subvector on a temporary vector; results in a dangling reference! auto sv = subvector<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_subvectors_element_access Element Access // <hr> // // The elements of a subvector can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Creating an 8-dimensional subvector, starting from index 4 auto sv = subvector( v, 4UL, 8UL ); // Setting the 1st element of the subvector, which corresponds to // the element at index 5 in vector v sv[1] = 2.0; \endcode // The numbering of the subvector elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the specified size of the subvector. Alternatively, the elements of a subvector can // be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // subvectors an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { *it = ...; // OK: Write access to the dense subvector value. ... = *it; // OK: Read access to the dense subvector value. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense subvector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating a reference to a specific subvector of vector v auto sv = subvector( v, 16UL, 64UL ); // Traversing the elements via iterators to non-const elements for( auto it=sv.begin(); it!=sv.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=sv.cbegin(); it!=sv.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_subvectors_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse subvector can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 auto sv = subvector( v, 10UL, 60UL ); // View on the range [10..69] of v // The subscript operator provides access to all possible elements of the sparse subvector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse subvector, the element is inserted into the // subvector. sv[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the subvector it is inserted into the subvector, if it is already contained // in the subvector its value is modified. sv.set( 45UL, -1.2 ); // An alternative for inserting elements into the subvector is the insert() function. However, // it inserts the element only in case the element is not already contained in the subvector. sv.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In // case of subvectors, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the subvector and that the subvector's // capacity is large enough to hold the new element. Note however that due to the nature of // a subvector, which may be an alias to the middle of a sparse vector, the append() function // does not work as efficiently for a subvector as it does for a vector. sv.reserve( 10UL ); sv.append( 51UL, -2.1 ); \endcode // \n \section views_subvectors_common_operations Common Operations // <hr> // // A subvector view can be used like any other dense or sparse vector. This means that with // only a few exceptions all \ref vector_operations and \ref arithmetic_operations can be used. // For instance, the current number of elements can be obtained via the \c size() function, the // current capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since subvectors are references to a specific range of a // vector, several operations are not possible, such as resizing and swapping. The following // example shows this by means of a dense subvector view: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Creating a view on the range [5..15] of vector v auto sv = subvector( v, 5UL, 10UL ); sv.size(); // Returns the number of elements in the subvector sv.capacity(); // Returns the capacity of the subvector sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector auto sv2 = subvector( v, 15UL, 10UL ); swap( sv, sv2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_subvectors_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse subvectors can be used in all arithmetic operations that any other dense // or sparse vector can be used in. The following example gives an impression of the use of dense // subvectors within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; auto sv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1 sv = d2; // Dense vector initialization of the range [0..9] subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19] d3 = sv + d2; // Dense vector/dense vector addition s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition d2 = sv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6] d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9] d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9] subvector( d1, 0UL , 10UL ) += d2; // Addition assignment subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment subvector( d1, 20UL, 10UL ) *= sv; // Multiplication assignment double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors \endcode // \n \section views_aligned_subvectors Aligned Subvectors // <hr> // // Usually subvectors can be defined anywhere within a vector. They may start at any position and // may have an arbitrary size (only restricted by the size of the underlying vector). However, in // contrast to vectors themselves, which are always properly aligned in memory and therefore can // provide maximum performance, this means that subvectors in general have to be considered to be // unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Identical creations of an unaligned subvector in the range [8..23] auto sv1 = subvector ( x, 8UL, 16UL ); auto sv2 = subvector<unaligned>( x, 8UL, 16UL ); auto sv3 = subvector<8UL,16UL> ( x ); auto sv4 = subvector<unaligned,8UL,16UL>( x ); \endcode // All of these calls to the \c subvector() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide // full flexibility in the creation of subvectors, this might result in performance disadvantages // in comparison to vector primitives (even in case the specified subvector could be aligned). // Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a vector might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned subvectors. Aligned subvectors are identical to // unaligned subvectors in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying vector. Aligned subvectors are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of the subvector must be aligned. The following source code gives some examples // for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double // values into a SIMD vector: \code using blaze::aligned; blaze::DynamicVector<double,blaze::columnVector> d( 17UL ); // ... Resizing and initialization // OK: Starts at the beginning, i.e. the first element is aligned auto dsv1 = subvector<aligned>( d, 0UL, 13UL ); // OK: Start index is a multiple of 4, i.e. the first element is aligned auto dsv2 = subvector<aligned>( d, 4UL, 7UL ); // OK: The start index is a multiple of 4 and the subvector includes the last element auto dsv3 = subvector<aligned>( d, 8UL, 9UL ); // Error: Start index is not a multiple of 4, i.e. the first element is not aligned auto dsv4 = subvector<aligned>( d, 5UL, 8UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense subvectors. // In contrast, aligned sparse subvectors at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned subvector is created: \code using blaze::aligned; blaze::CompressedVector<double,blaze::rowVector> x; // ... Resizing and initialization // Creating an aligned subvector in the range [8..23] auto sv1 = subvector<aligned>( x, 8UL, 16UL ); auto sv2 = subvector<aligned,8UL,16UL>( x ); \endcode // \n Previous: \ref views &nbsp; &nbsp; Next: \ref views_element_selections */ //************************************************************************************************* //**Element Selections***************************************************************************** /*!\page views_element_selections Element Selections // // \tableofcontents // // // Element selections provide views on arbitrary compositions of elements of dense and sparse // vectors. These views act as a reference to the selected elements and represent them as another // dense or sparse vector. This reference is valid and can be used in every way any other dense // or sparse vector can be used as long as the vector containing the elements is not resized or // entirely destroyed. The element selection also acts as an alias to the vector elements in the // specified range: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the vector and changes made via the vector are immediately // visible in the elements. // // // \n \section views_element_selections_setup Setup of Element Selections // // An element selection can be created very conveniently via the \c elements() function. It can // be included via the header file \code #include <blaze/math/Elements.h> \endcode // The indices of the elements to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicVector<double,blaze::rowVector> x; // ... Resizing and initialization // Selecting the elements 4, 6, 8, and 10 (compile time arguments) auto e1 = elements<4UL,6UL,8UL,10UL>( x ); // Selecting the elements 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto e2 = elements( x, { 3UL, 2UL, 1UL } ); auto e3 = elements( x, list ); // Selecting the elements 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto e4 = elements( x, array ); auto e5 = elements( x, array.data(), array.size() ); // Selecting the element 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto e6 = elements( x, vector ); auto e7 = elements( x, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the elements of the underlying vector in any order. Also note // that it is possible to use the same index multiple times. The \c elements() function returns an // expression representing the view on the selected elements. The type of this expression depends // on the given arguments, primarily the type of the vector and the compile time arguments. If the // type is required, it can be determined via the \c decltype specifier: \code using VectorType = blaze::DynamicVector<int>; using ElementsType = decltype( blaze::elements<4UL,12UL>( std::declval<VectorType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse vector, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. An element selection // created from a row vector can be used as any other row vector, an element selection created // from a column vector can be used as any other column vector. The view can also be used on both // sides of an assignment: It can either be used as an alias to grant write access to specific // elements of a vector primitive on the left-hand side of an assignment or to grant read-access // to specific elements of a vector primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the elements 1, 3, 5, and 7 auto e = elements( x, { 1UL, 3UL, 5UL, 7UL } ); // Setting the elements 1, 3, 5, and 7 of x to the 2nd row of matrix A e = row( A, 2UL ); // Setting the elements 2, 4, 6, and 8 of x to y elements( x, { 2UL, 4UL, 6UL, 8UL } ) = y; // Setting the 3rd row of A to the elements 5, 4, 3, and 2 of x row( A, 3UL ) = elements( x, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between y and the 1st row of A x = elements( y + row( A, 1UL ), { 2UL, 3UL, 0UL, 1UL } ) \endcode // Please note that using an element selection, which refers to an index multiple times, on the // left-hand side of an assignment leads to undefined behavior: \code blaze::DynamicVector<int,blaze::rowVector> a{ 1, 2, 3 }; blaze::DynamicVector<int,blaze::rowVector> b{ 1, 2, 3, 4 }; auto e = elements( a, { 1, 1, 1, 1 } ); // Selecting the element 1 four times e = b; // Undefined behavior \endcode // In this example both vectors have the same size, which results in a correct vector assignment, // but the final value of the element at index 1 is unspecified. // // \warning It is the programmer's responsibility to ensure the element selection does not outlive // the viewed vector: \code // Creating an element selection on a temporary vector; results in a dangling reference! auto e = elements<1UL,3UL>( DynamicVector<int>{ 1, 2, 3, 4, 5 } ); \endcode // \n \section views_element_selections_element_access Element Access // // The elements of an element selection can be directly accessed via the subscript operator: \code blaze::DynamicVector<double,blaze::rowVector> v; // ... Resizing and initialization // Selecting the elements 2, 4, 6, and 8 auto e = elements( v, { 2UL, 4UL, 6UL, 8UL } ); // Setting the 1st element of the element selection, which corresponds to // the element at index 4 in vector v e[1] = 2.0; \endcode // The numbering of the selected elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of selected elements. Alternatively, the elements of an element selection // can be traversed via iterators. Just as with vectors, in case of non-const element selections, // \c begin() and \c end() return an iterator, which allows to manipulate the elements, in case of // constant element selections an iterator to immutable elements is returned: \code blaze::DynamicVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of dense vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { *it = ...; // OK: Write access to the dense vector value. ... = *it; // OK: Read access to the dense vector value. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense vector value. } \endcode \code blaze::CompressedVector<int,blaze::rowVector> v( 256UL ); // ... Resizing and initialization // Creating an element selection including specific elements of sparse vector v auto e = elements( v, { 0UL, 3UL, 6UL, 9UL, 12UL } ); // Traversing the elements via iterators to non-const elements for( auto it=e.begin(); it!=e.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=e.cbegin(); it!=e.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_element_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse element selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedVector<double,blaze::rowVector> v( 256UL ); // Non-initialized vector of size 256 std::vector<size_t> indices; // ... Selecting indices of the sparse vector auto e = elements( v, indices ); // The subscript operator provides access to the selected elements of the sparse vector, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse vector, the element is inserted. e[42] = 2.0; // The second operation for inserting elements via the element selection is the set() function. // In case the element is not contained in the vector it is inserted into the vector, if it is // already contained in the vector its value is modified. e.set( 45UL, -1.2 ); // An alternative for inserting elements into the vector is the insert() function. However, it // inserts the element only in case the element is not already contained in the vector. e.insert( 50UL, 3.7 ); // Just as in case of vectors, elements can also be inserted via the append() function. In case // of element selections, append() also requires that the appended element's index is strictly // larger than the currently largest non-zero index of the selection and that the selections's // capacity is large enough to hold the new element. Note however that due to the nature of an // element selection, which is an alias to arbitrary elements of a sparse vector, the append() // function does not work as efficiently for an element selection as it does for a vector. e.reserve( 10UL ); e.append( 51UL, -2.1 ); \endcode // \n \section views_element_selections_common_operations Common Operations // // An element selection can be used like any other dense or sparse vector. For instance, the // number of selected elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since element selections are references to a specific range of a vector, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of an element selection on a dense vector: \code blaze::DynamicVector<int,blaze::rowVector> v( 42UL ); // ... Resizing and initialization // Selecting the elements 5 and 10 auto e = elements( v, { 5UL, 10UL } ); e.size(); // Returns the number of elements in the element selection e.capacity(); // Returns the capacity of the element selection e.nonZeros(); // Returns the number of non-zero elements contained in the element selection e.resize( 84UL ); // Compilation error: Cannot resize an element selection auto e2 = elements( v, { 15UL, 10UL } ); swap( e, e2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_element_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse element selections can be used in all arithmetic operations that any other // dense or sparse vector can be used in. The following example gives an impression of the use of // dense element selections within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and sparse // element selections with fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> d1, d2, d3; blaze::CompressedVector<double,blaze::rowVector> s1, s2; // ... Resizing and initialization blaze::DynamicMatrix<double,blaze::rowMajor> A; std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto e( elements( d1, indices1 ) ); // Selecting the every third element of d1 in the range [0..21] e = d2; // Dense vector assignment to the selected elements elements( d1, indices2 ) = s1; // Sparse vector assignment to the selected elements d3 = e + d2; // Dense vector/dense vector addition s2 = s1 + elements( d1, indices2 ); // Sparse vector/dense vector addition d2 = e * elements( d1, indices3 ); // Component-wise vector multiplication elements( d1, indices2 ) *= 2.0; // In-place scaling of the second selection of elements d2 = elements( d1, indices3 ) * 2.0; // Scaling of the elements in the third selection of elements d2 = 2.0 * elements( d1, indices3 ); // Scaling of the elements in the third selection of elements elements( d1, indices1 ) += d2; // Addition assignment elements( d1, indices2 ) -= s2; // Subtraction assignment elements( d1, indices3 ) *= e; // Multiplication assignment double scalar = elements( d1, indices2 ) * trans( s1 ); // Scalar/dot/inner product between two vectors A = trans( s1 ) * elements( d1, { 3UL, 6UL } ); // Outer product between two vectors \endcode // \n Previous: \ref views_subvectors &nbsp; &nbsp; Next: \ref views_submatrices */ //************************************************************************************************* //**Submatrices************************************************************************************ /*!\page views_submatrices Submatrices // // \tableofcontents // // // Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors // provide views on specific parts of vectors. As such, submatrices act as a reference to a // specific block within a matrix. This reference is valid and can be used in evary way any // other dense or sparse matrix can be used as long as the matrix containing the submatrix is // not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements // in the specified block: Changes made to the elements (e.g. modifying values, inserting or // erasing elements) are immediately visible in the matrix and changes made via the matrix are // immediately visible in the submatrix. // // // \n \section views_submatrices_setup Setup of Submatrices // <hr> // // A view on a dense or sparse submatrix can be created very conveniently via the \c submatrix() // function. It can be included via the header file \code #include <blaze/math/Submatrix.h> \endcode // The first and second parameter specify the row and column of the first element of the submatrix. // The third and fourth parameter specify the number of rows and columns, respectively. The four // parameters can be specified either at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a dense submatrix of size 4x8, starting in row 3 and column 0 (compile time arguments) auto sm1 = submatrix<3UL,0UL,4UL,8UL>( A ); // Creating a dense submatrix of size 8x16, starting in row 0 and column 4 (runtime arguments) auto sm2 = submatrix( A, 0UL, 4UL, 8UL, 16UL ); \endcode // The \c submatrix() function returns an expression representing the submatrix view. The type of // this expression depends on the given submatrix arguments, primarily the type of the matrix and // the compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using SubmatrixType = decltype( blaze::submatrix<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. A submatrix created from // a row-major matrix will itself be a row-major matrix, a submatrix created from a column-major // matrix will be a column-major matrix. The view can also be used on both sides of an assignment: // The submatrix can either be used as an alias to grant write access to a specific submatrix // of a matrix primitive on the left-hand side of an assignment or to grant read-access to // a specific submatrix of a matrix primitive or expression on the right-hand side of an // assignment. The following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Creating a dense submatrix of size 8x4, starting in row 0 and column 2 auto sm = submatrix( A, 0UL, 2UL, 8UL, 4UL ); // Setting the submatrix of A to a 8x4 submatrix of B sm = submatrix( B, 0UL, 0UL, 8UL, 4UL ); // Copying the sparse matrix C into another 8x4 submatrix of A submatrix( A, 8UL, 2UL, 8UL, 4UL ) = C; // Assigning part of the result of a matrix addition to the first submatrix sm = submatrix( B + C, 0UL, 0UL, 8UL, 4UL ); \endcode // \warning It is the programmer's responsibility to ensure the submatrix does not outlive the // viewed matrix: \code // Creating a submatrix on a temporary matrix; results in a dangling reference! auto sm = submatrix<1UL,0UL,2UL,3UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_submatrices_element_access Element Access // <hr> // // The elements of a submatrix can be directly accessed with the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a 8x8 submatrix, starting from position (4,4) auto sm = submatrix( A, 4UL, 4UL, 8UL, 8UL ); // Setting the element (0,0) of the submatrix, which corresponds to // the element at position (4,4) in matrix A sm(0,0) = 2.0; \endcode // Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as // with matrices, in case of non-const submatrices, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant submatrices an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { *it = ...; // OK: Write access to the dense submatrix value. ... = *it; // OK: Read access to the dense submatrix value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense submatrix value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a specific submatrix of matrix A auto sm = submatrix( A, 16UL, 16UL, 64UL, 128UL ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=sm.begin(0); it!=sm.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=sm.cbegin(1); it!=sm.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_submatrices_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse submatrix can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A // The function call operator provides access to all possible elements of the sparse submatrix, // including the zero elements. In case the function call operator is used to access an element // that is currently not stored in the sparse submatrix, the element is inserted into the // submatrix. sm(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the submatrix it is inserted into the submatrix, if it is already contained // in the submatrix its value is modified. sm.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the submatrix is the insert() function. However, // it inserts the element only in case the element is not already contained in the submatrix. sm.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of submatrices, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // or column of the submatrix and that the according row's or column's capacity is large // enough to hold the new element. Note however that due to the nature of a submatrix, which // may be an alias to the middle of a sparse matrix, the append() function does not work as // efficiently for a submatrix as it does for a matrix. sm.reserve( 2UL, 10UL ); sm.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_submatrices_common_operations Common Operations // <hr> // // A submatrix view can be used like any other dense or sparse matrix. This means that with only // a few exceptions all \ref matrix_operations and \ref arithmetic_operations can be used. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // submatrices are views on a specific submatrix of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the a 8x12 submatrix of matrix A auto sm = submatrix( A, 0UL, 0UL, 8UL, 12UL ); sm.rows(); // Returns the number of rows of the submatrix sm.columns(); // Returns the number of columns of the submatrix sm.capacity(); // Returns the capacity of the submatrix sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix auto sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL ); swap( sm, sm2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_submatrices_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse submatrices can be used in all arithmetic operations that any other dense // or sparse matrix can be used in. The following example gives an impression of the use of dense // submatrices within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse matrices with // fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization auto sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1 // starting from row 0 and column 0 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix // starting in row 0 and column 8 sm = S1; // Sparse matrix initialization of the second 8x8 submatrix D3 = sm + D2; // Dense matrix/dense matrix addition S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1 D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1 D2 = 2.0 * sm; // Scaling of the a submatrix of D1 submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_aligned_submatrices Aligned Submatrices // <hr> // // Usually submatrices can be defined anywhere within a matrix. They may start at any position and // may have an arbitrary extension (only restricted by the extension of the underlying matrix). // However, in contrast to matrices themselves, which are always properly aligned in memory and // therefore can provide maximum performance, this means that submatrices in general have to be // considered to be unaligned. This can be made explicit by the \c blaze::unaligned flag: \code using blaze::unaligned; blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0 auto sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL ); auto sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sm3 = submatrix<0UL,0UL,8UL,8UL> ( A ); auto sm4 = submatrix<unaligned,0UL,0UL,8UL,8UL>( A ); \endcode // All of these calls to the \c submatrix() function are identical. Whether the alignment flag is // explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide // full flexibility in the creation of submatrices, this might result in performance disadvantages // in comparison to matrix primitives (even in case the specified submatrix could be aligned). // Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum // performance in all operations, a general view on a matrix might not be properly aligned. This // may cause a performance penalty on some platforms and/or for some operations. // // However, it is also possible to create aligned submatrices. Aligned submatrices are identical to // unaligned submatrices in all aspects, except that they may pose additional alignment restrictions // and therefore have less flexibility during creation, but don't suffer from performance penalties // and provide the same performance as the underlying matrix. Aligned submatrices are created by // explicitly specifying the \c blaze::aligned flag: \code using blaze::aligned; // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv1 = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); auto sv2 = submatrix<aligned,0UL,0UL,8UL,8UL>( A ); \endcode // The alignment restrictions refer to system dependent address restrictions for the used element // type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the // first element of each row/column of the submatrix must be aligned. The following source code // gives some examples for a double precision row-major dynamic matrix, assuming that padding is // enabled and that AVX is available, which packs 4 \c double values into a SIMD vector: \code using blaze::aligned; blaze::DynamicMatrix<double,blaze::rowMajor> D( 13UL, 17UL ); // ... Resizing and initialization // OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding) auto dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL ); // OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding) auto dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL ); // OK: First column is a multiple of 4 and the submatrix includes the last row and column auto dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL ); // Error: First column is not a multiple of 4, i.e. the first element is not aligned auto dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL ); \endcode // Note that the discussed alignment restrictions are only valid for aligned dense submatrices. // In contrast, aligned sparse submatrices at this time don't pose any additional restrictions. // Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case // the \c blaze::aligned flag is specified during setup, an aligned submatrix is created: \code using blaze::aligned; blaze::CompressedMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating an aligned submatrix of size 8x8, starting in row 0 and column 0 auto sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL ); \endcode // \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices // // Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template): \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of a 16x16 symmetric matrix SymmetricMatrix< DynamicMatrix<int> > A( 16UL ); // Creating a dense submatrix of size 8x12, starting in row 2 and column 4 auto sm = submatrix( A, 2UL, 4UL, 8UL, 12UL ); \endcode // It is important to note, however, that (compound) assignments to such submatrices have a // special restriction: The symmetry of the underlying symmetric matrix must not be broken! // Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the // element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry // of the symmetric matrix is preserved. Otherwise a \a std::invalid_argument exception is // thrown: \code using blaze::DynamicMatrix; using blaze::SymmetricMatrix; // Setup of two default 4x4 symmetric matrices SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 ); // Setup of the 3x2 dynamic matrix // // ( 1 2 ) // B = ( 3 4 ) // ( 5 6 ) // DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; // OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved // // ( 0 0 1 2 ) // A1 = ( 0 0 3 4 ) // ( 1 3 5 6 ) // ( 2 4 6 0 ) // submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK // Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved! // The elements marked with X cannot be assigned unambiguously! // // ( 0 1 2 0 ) // A2 = ( 1 3 X 0 ) // ( 2 X 6 0 ) // ( 0 0 0 0 ) // submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception! \endcode // \n Previous: \ref views_element_selections &nbsp; &nbsp; Next: \ref views_rows */ //************************************************************************************************* //**Rows******************************************************************************************* /*!\page views_rows Rows // // \tableofcontents // // // Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a // reference to a specific row. This reference is valid and can be used in every way any other // row vector can be used as long as the matrix containing the row is not resized or entirely // destroyed. The row also acts as an alias to the row elements: Changes made to the elements // (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix // and changes made via the matrix are immediately visible in the row. // // // \n \section views_rows_setup Setup of Rows // <hr> // // \image html row.png // \image latex row.eps "Row view" width=250pt // // A reference to a dense or sparse row can be created very conveniently via the \c row() function. // It can be included via the header file \code #include <blaze/math/Row.h> \endcode // The row index must be in the range from \f$[0..M-1]\f$, where \c M is the total number of rows // of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st row of matrix A (compile time index) auto row1 = row<1UL>( A ); // Creating a reference to the 2nd row of matrix A (runtime index) auto row2 = row( A, 2UL ); \endcode // The \c row() function returns an expression representing the row view. The type of this // expression depends on the given row arguments, primarily the type of the matrix and the compile // time arguments. If the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowType = decltype( blaze::row<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other row vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. The reference can also be used on // both sides of an assignment: The row can either be used as an alias to grant write access to a // specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access // to a specific row of a matrix primitive or expression on the right-hand side of an assignment. // The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd row of matrix A to x auto row2 = row( A, 2UL ); row2 = x; // Setting the 3rd row of matrix B to y row( B, 3UL ) = y; // Setting x to the 4th row of the result of the matrix multiplication x = row( A * B, 4UL ); // Setting y to the 2nd row of the result of the sparse matrix multiplication y = row( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the row does not outlive the viewed // matrix: \code // Creating a row on a temporary matrix; results in a dangling reference! auto row1 = row<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_rows_element_access Element Access // <hr> // // The elements of a row can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th row of matrix A auto row4 = row( A, 4UL ); // Setting the 1st element of the dense row, which corresponds // to the 1st element in the 4th row of matrix A row4[1] = 2.0; \endcode // The numbering of the row elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of columns of the referenced matrix. Alternatively, the elements of a // row can be traversed via iterators. Just as with vectors, in case of non-const rows, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // rows an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { *it = ...; // OK; Write access to the dense row value ... = *it; // OK: Read access to the dense row value. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = *it; // OK: Read access to the dense row value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st row of matrix A auto row31 = row( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=row31.begin(); it!=row31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=row31.cbegin(); it!=row31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_rows_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse row can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A // The subscript operator provides access to all possible elements of the sparse row, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse row, the element is inserted into the row. row0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the row it is inserted into the row, if it is already contained in // the row its value is modified. row0.set( 45UL, -1.2 ); // An alternative for inserting elements into the row is the insert() function. However, // it inserts the element only in case the element is not already contained in the row. row0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse row is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the row and that the row's capacity is large // enough to hold the new element. row0.reserve( 10UL ); row0.append( 51UL, -2.1 ); \endcode // \n \section views_rows_common_operations Common Operations // <hr> // // A row view can be used like any other row vector. This means that with only a few exceptions // all \ref vector_operations and \ref arithmetic_operations can be used. For instance, the // current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since rows are references to specific rows of a matrix, several operations // are not possible on views, such as resizing and swapping. The following example shows this by // means of a dense row view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd row of matrix A auto row2 = row( A, 2UL ); row2.size(); // Returns the number of elements in the row row2.capacity(); // Returns the capacity of the row row2.nonZeros(); // Returns the number of non-zero elements contained in the row row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix auto row3 = row( A, 3UL ); swap( row2, row3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_rows_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse rows can be used in all arithmetic operations that any other dense or // sparse row vector can be used in. The following example gives an impression of the use of // dense rows within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse rows with // fitting element types: \code blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::rowVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto row0( row( A, 0UL ) ); // Reference to the 0th row of A row0[0] = 0.0; // Manual initialization of the 0th row of A row0[1] = 0.0; row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A b = row0 + a; // Dense vector/dense vector addition b = c + row( A, 1UL ); // Sparse vector/dense vector addition b = row0 * row( A, 2UL ); // Component-wise vector multiplication row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row b = row( A, 1UL ) * 2.0; // Scaling of the 1st row b = 2.0 * row( A, 1UL ); // Scaling of the 1st row row( A, 2UL ) += a; // Addition assignment row( A, 2UL ) -= c; // Subtraction assignment row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors A = trans( c ) * row( A, 1UL ); // Outer product between two vectors \endcode // \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that row views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st row of a column-major matrix A auto row1 = row( A, 1UL ); for( auto it=row1.begin(); it!=row1.end(); ++it ) { // ... } \endcode // However, please note that creating a row view on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row view on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th row of the multiplication between A and B ... blaze::DynamicVector<double,blaze::rowVector> x = row( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // the 15th row of the column-major matrix A with B. blaze::DynamicVector<double,blaze::rowVector> x = row( A, 15UL ) * B; \endcode // Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_submatrices &nbsp; &nbsp; Next: \ref views_row_selections */ //************************************************************************************************* //**Row Selections********************************************************************************* /*!\page views_row_selections Row Selections // // \tableofcontents // // // Row selections provide views on arbitrary compositions of rows of dense and sparse matrices. // These views act as a reference to the selected rows and represent them as another dense or // sparse matrix. This reference is valid and can be used in every way any other dense or sparse // matrix can be used as long as the matrix containing the rows is not resized or entirely // destroyed. The row selection also acts as an alias to the matrix elements in the specified // range: Changes made to the rows (e.g. modifying values, inserting or erasing elements) are // immediately visible in the matrix and changes made via the matrix are immediately visible // in the rows. // // // \n \section views_row_selections_setup Setup of Row Selections // // A row selection can be created very conveniently via the \c rows() function. It can be included // via the header file \code #include <blaze/math/Rows.h> \endcode // The indices of the rows to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Selecting the rows 4, 6, 8, and 10 (compile time arguments) auto rs1 = rows<4UL,6UL,8UL,10UL>( A ); // Selecting the rows 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto rs2 = rows( A, { 3UL, 2UL, 1UL } ); auto rs3 = rows( A, list ); // Selecting the rows 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto rs4 = rows( A, array ); auto rs5 = rows( A, array.data(), array.size() ); // Selecting the row 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto rs6 = rows( A, vector ); auto rs7 = rows( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the rows of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c rows() function returns an // expression representing the view on the selected rows. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using RowsType = decltype( blaze::rows<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // row selection will always be treated as a row-major matrix, regardless of the storage order of // the matrix containing the rows. The view can also be used on both sides of an assignment: It // can either be used as an alias to grant write access to specific rows of a matrix primitive // on the left-hand side of an assignment or to grant read-access to specific rows of a matrix // primitive or expression on the right-hand side of an assignment. The following example // demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; blaze::DynamicMatrix<double,blaze::columnMajor> B; blaze::CompressedMatrix<double,blaze::rowMajor> C; // ... Resizing and initialization // Selecting the rows 1, 3, 5, and 7 of A auto rs = rows( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting rows 1, 3, 5, and 7 of A to row 4 of B rs = rows( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the rows 2, 4, 6, and 8 of A to C rows( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 rows of A to the rows 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, 4UL, A.columns() ) = rows( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between rows 1, 3, 5, and 7 of A and C B = rows( rs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the row selection does not outlive the // viewed matrix: \code // Creating a row selection on a temporary matrix; results in a dangling reference! auto rs = rows<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_row_selections_element_access Element Access // // The elements of a row selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the first four rows of A in reverse order auto rs = rows( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the row selection, which corresponds // to the element at position (3,0) in matrix A rs(0,0) = 2.0; \endcode // Alternatively, the elements of a row selection can be traversed via (const) iterators. Just as // with matrices, in case of non-const row selection, \c begin() and \c end() return an iterator, // which allows to manipuate the elements, in case of constant row selection an iterator to // immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 256UL, 512UL ); // ... Resizing and initialization // Creating a reference to a selection of rows of matrix A auto rs = rows( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th row via iterators to non-const elements for( auto it=rs.begin(0); it!=rs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st row via iterators to const elements for( auto it=rs.cbegin(1); it!=rs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_row_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse row selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 256UL, 512UL ); // Non-initialized matrix of size 256x512 auto rs = rows( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the rows 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse row // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse row selection, the element // is inserted into the row selection. rs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the row selection it is inserted into the row selection, if it is already // contained in the row selection its value is modified. rs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the row selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // row selection. rs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of row selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according row // of the row selection and that the according row's capacity is large enough to hold the new // element. Note however that due to the nature of a row selection, which may be an alias to // an arbitrary collection of rows, the append() function does not work as efficiently for // a row selection as it does for a matrix. rs.reserve( 2UL, 10UL ); rs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_row_selections_common_operations Common Operations // // A view on specific rows of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // row selections are views on specific rows of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the rows 8, 16, 24, and 32 of matrix A auto rs = rows( A, { 8UL, 16UL, 24UL, 32UL } ); rs.rows(); // Returns the number of rows of the row selection rs.columns(); // Returns the number of columns of the row selection rs.capacity(); // Returns the capacity of the row selection rs.nonZeros(); // Returns the number of non-zero elements contained in the row selection rs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a row selection auto rs2 = rows( A, 9UL, 17UL, 25UL, 33UL ); swap( rs, rs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_row_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse row selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use // of dense row selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::rowMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::rowMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto rs = rows( D1, indices1 ); // Selecting the every third row of D1 in the range [0..21] rs = D2; // Dense matrix assignment to the selected rows rows( D1, indices2 ) = S1; // Sparse matrix assignment to the selected rows D3 = rs + D2; // Dense matrix/dense matrix addition S2 = S1 - rows( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = rs % rows( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = rows( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication rows( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of rows D2 = rows( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of rows D2 = 2.0 * rows( D1, indices3 ); // Scaling of the elements in the third selection of rows rows( D1, indices1 ) += D2; // Addition assignment rows( D1, indices2 ) -= S1; // Subtraction assignment rows( D1, indices3 ) %= rs; // Schur product assignment a = rows( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_row_selections_on_column_major_matrix Row Selections on Column-Major Matrices // // Especially noteworthy is that row selections can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd row of a column-major matrix A auto rs = rows( A, { 1UL, 3UL } ); // Traversing row 0 of the selection, which corresponds to the 1st row of matrix A for( auto it=rs.begin( 0UL ); it!=rs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a row selection on a matrix stored in a column-major fashion // can result in a considerable performance decrease in comparison to a row selection on a matrix // with row-major storage format. This is due to the non-contiguous storage of the matrix elements. // Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two column-major matrices blaze::DynamicMatrix<double,blaze::columnMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::columnMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th row of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // the 15th, 30th, and 45th row of the column-major matrix A with B. blaze::DynamicMatrix<double,blaze::rowMajor> x = rows( A, { 15UL, 30UL, 45UL } ) * B; \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a row-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_rows &nbsp; &nbsp; Next: \ref views_columns */ //************************************************************************************************* //**Columns**************************************************************************************** /*!\page views_columns Columns // // \tableofcontents // // // Just as rows provide a view on a specific row of a matrix, columns provide views on a specific // column of a dense or sparse matrix. As such, columns act as a reference to a specific column. // This reference is valid an can be used in every way any other column vector can be used as long // as the matrix containing the column is not resized or entirely destroyed. Changes made to the // elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the // matrix and changes made via the matrix are immediately visible in the column. // // // \n \section views_colums_setup Setup of Columns // <hr> // // \image html column.png // \image latex column.eps "Column view" width=250pt // // A reference to a dense or sparse column can be created very conveniently via the \c column() // function. It can be included via the header file \code #include <blaze/math/Column.h> \endcode // The column index must be in the range from \f$[0..N-1]\f$, where \c N is the total number of // columns of the matrix, and can be specified both at compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a reference to the 1st column of matrix A (compile time index) auto col1 = column<1UL>( A ); // Creating a reference to the 2nd column of matrix A (runtime index) auto col2 = column( A, 2UL ); \endcode // The \c column() function returns an expression representing the column view. The type of this // expression depends on the given column arguments, primarily the type of the matrix and the // compile time arguments. If the type is required, it can be determined via the \c decltype // specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnType = decltype( blaze::column<1UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other column vector, i.e. it can be assigned to, it // can be copied from, and it can be used in arithmetic operations. The reference can also be used // on both sides of an assignment: The column can either be used as an alias to grant write access // to a specific column of a matrix primitive on the left-hand side of an assignment or to grant // read-access to a specific column of a matrix primitive or expression on the right-hand side // of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::columnVector> x; blaze::CompressedVector<double,blaze::columnVector> y; blaze::DynamicMatrix<double,blaze::columnMajor> A, B; blaze::CompressedMatrix<double,blaze::columnMajor> C, D; // ... Resizing and initialization // Setting the 1st column of matrix A to x auto col1 = column( A, 1UL ); col1 = x; // Setting the 4th column of matrix B to y column( B, 4UL ) = y; // Setting x to the 2nd column of the result of the matrix multiplication x = column( A * B, 2UL ); // Setting y to the 2nd column of the result of the sparse matrix multiplication y = column( C * D, 2UL ); \endcode // \warning It is the programmer's responsibility to ensure the column does not outlive the // viewed matrix: \code // Creating a column on a temporary matrix; results in a dangling reference! auto col1 = column<1UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_columns_element_access Element Access // <hr> // // The elements of a column can be directly accessed with the subscript operator. \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the 4th column of matrix A auto col4 = column( A, 4UL ); // Setting the 1st element of the dense column, which corresponds // to the 1st element in the 4th column of matrix A col4[1] = 2.0; \endcode // The numbering of the column elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of rows of the referenced matrix. Alternatively, the elements of a column // can be traversed via iterators. Just as with vectors, in case of non-const columns, \c begin() // and \c end() return an iterator, which allows to manipulate the elements, in case of constant // columns an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { *it = ...; // OK; Write access to the dense column value ... = *it; // OK: Read access to the dense column value. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense column value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 31st column of matrix A auto col31 = column( A, 31UL ); // Traversing the elements via iterators to non-const elements for( auto it=col31.begin(); it!=col31.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=col31.cbegin(); it!=col31.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_columns_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse column can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 100UL, 10UL ); // Non-initialized 100x10 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A // The subscript operator provides access to all possible elements of the sparse column, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse column, the element is inserted into the column. col0[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the column it is inserted into the column, if it is already contained // in the column its value is modified. col0.set( 45UL, -1.2 ); // An alternative for inserting elements into the column is the insert() function. However, // it inserts the element only in case the element is not already contained in the column. col0.insert( 50UL, 3.7 ); // A very efficient way to add new elements to a sparse column is the append() function. // Note that append() requires that the appended element's index is strictly larger than // the currently largest non-zero index of the column and that the column's capacity is // large enough to hold the new element. col0.reserve( 10UL ); col0.append( 51UL, -2.1 ); \endcode // \n \section views_columns_common_operations Common Operations // <hr> // // A column view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of elements can be obtained via the \c size() function, the current capacity // via the \c capacity() function, and the number of non-zero elements via the \c nonZeros() // function. However, since columns are references to specific columns of a matrix, several // operations are not possible on views, such as resizing and swapping. The following example // shows this by means of a dense column view: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd column of matrix A auto col2 = column( A, 2UL ); col2.size(); // Returns the number of elements in the column col2.capacity(); // Returns the capacity of the column col2.nonZeros(); // Returns the number of non-zero elements contained in the column col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix auto col3 = column( A, 3UL ); swap( col2, col3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_columns_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse columns can be used in all arithmetic operations that any other dense or // sparse column vector can be used in. The following example gives an impression of the use of // dense columns within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse columns with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::columnMajor> A( 2UL, 4UL ); // Non-initialized 2x4 matrix auto col0( column( A, 0UL ) ); // Reference to the 0th column of A col0[0] = 0.0; // Manual initialization of the 0th column of A col0[1] = 0.0; column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A b = col0 + a; // Dense vector/dense vector addition b = c + column( A, 1UL ); // Sparse vector/dense vector addition b = col0 * column( A, 2UL ); // Component-wise vector multiplication column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column b = column( A, 1UL ) * 2.0; // Scaling of the 1st column b = 2.0 * column( A, 1UL ); // Scaling of the 1st column column( A, 2UL ) += a; // Addition assignment column( A, 2UL ) -= c; // Subtraction assignment column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors A = column( A, 1UL ) * trans( c ); // Outer product between two vectors \endcode // \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order // <hr> // // Especially noteworthy is that column views can be created for both row-major and column-major // matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly // and the interface of a column-major matrix only allows to traverse a column, via views it is // possible to traverse a row of a column-major matrix or a column of a row-major matrix. For // instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st column of a column-major matrix A auto col1 = column( A, 1UL ); for( auto it=col1.begin(); it!=col1.end(); ++it ) { // ... } \endcode // However, please note that creating a column view on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column view on a matrix // with column-major storage format. This is due to the non-contiguous storage of the matrix // elements. Therefore care has to be taken in the choice of the most suitable storage order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th column of the multiplication between A and B ... blaze::DynamicVector<double,blaze::columnVector> x = column( A * B, 15UL ); // ... is essentially the same as the following computation, which multiplies // A with the 15th column of the row-major matrix B. blaze::DynamicVector<double,blaze::columnVector> x = A * column( B, 15UL ); \endcode // Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible // using a column-major storage order for matrix \c B would result in a more efficient evaluation. // // \n Previous: \ref views_row_selections &nbsp; &nbsp; Next: \ref views_column_selections */ //************************************************************************************************* //**Column Selections****************************************************************************** /*!\page views_column_selections Column Selections // // \tableofcontents // // // Column selections provide views on arbitrary compositions of columns of dense and sparse // matrices. These views act as a reference to the selected columns and represent them as another // dense or sparse matrix. This reference is valid and can be used in every way any other dense // or sparse matrix can be used as long as the matrix containing the columns is not resized or // entirely destroyed. The column selection also acts as an alias to the matrix elements in the // specified range: Changes made to the columns (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the columns. // // // \n \section views_column_selections_setup Setup of Column Selections // // A column selection can be created very conveniently via the \c columns() function. It can be // included via the header file \code #include <blaze/math/Columns.h> \endcode // The indices of the columns to be selected can be specified either at compile time or at runtime // (by means of an initializer list, array or vector): \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Selecting the columns 4, 6, 8, and 10 (compile time arguments) auto cs1 = columns<4UL,6UL,8UL,10UL>( A ); // Selecting the columns 3, 2, and 1 (runtime arguments via an initializer list) const std::initializer_list<size_t> list{ 3UL, 2UL, 1UL }; auto cs2 = columns( A, { 3UL, 2UL, 1UL } ); auto cs3 = columns( A, list ); // Selecting the columns 1, 2, 3, 3, 2, and 1 (runtime arguments via a std::array) const std::array<size_t> array{ 1UL, 2UL, 3UL, 3UL, 2UL, 1UL }; auto cs4 = columns( A, array ); auto cs5 = columns( A, array.data(), array.size() ); // Selecting the column 4 fives times (runtime arguments via a std::vector) const std::vector<size_t> vector{ 4UL, 4UL, 4UL, 4UL, 4UL }; auto cs6 = columns( A, vector ); auto cs7 = columns( A, vector.data(), vector.size() ); \endcode // Note that it is possible to alias the columns of the underlying matrix in any order. Also note // that it is possible to use the same index multiple times. The \c columns() function returns an // expression representing the view on the selected columns. The type of this expression depends // on the given arguments, primarily the type of the matrix and the compile time arguments. If // the type is required, it can be determined via the \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using ColumnsType = decltype( blaze::columns<3UL,0UL,4UL,8UL>( std::declval<MatrixType>() ) ); \endcode // The resulting view can be treated as any other dense or sparse matrix, i.e. it can be assigned // to, it can be copied from, and it can be used in arithmetic operations. Note, however, that a // column selection will always be treated as a column-major matrix, regardless of the storage // order of the matrix containing the columns. The view can also be used on both sides of an // assignment: It can either be used as an alias to grant write access to specific columns of a // matrix primitive on the left-hand side of an assignment or to grant read-access to specific // columns of a matrix primitive or expression on the right-hand side of an assignment. The // following example demonstrates this in detail: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; blaze::DynamicMatrix<double,blaze::rowMajor> B; blaze::CompressedMatrix<double,blaze::columnMajor> C; // ... Resizing and initialization // Selecting the columns 1, 3, 5, and 7 of A auto cs = columns( A, { 1UL, 3UL, 5UL, 7UL } ); // Setting columns 1, 3, 5, and 7 of A to column 4 of B cs = columns( B, { 4UL, 4UL, 4UL, 4UL } ); // Setting the columns 2, 4, 6, and 8 of A to C columns( A, { 2UL, 4UL, 6UL, 8UL } ) = C; // Setting the first 4 columns of A to the columns 5, 4, 3, and 2 of C submatrix( A, 0UL, 0UL, A.rows(), 4UL ) = columns( C, { 5UL, 4UL, 3UL, 2UL } ); // Rotating the result of the addition between columns 1, 3, 5, and 7 of A and C B = columns( cs + C, { 2UL, 3UL, 0UL, 1UL } ); \endcode // \warning It is the programmer's responsibility to ensure the column selection does not outlive // the viewed matrix: \code // Creating a column selection on a temporary matrix; results in a dangling reference! auto cs = columns<2UL,0UL>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_column_selections_element_access Element Access // // The elements of a column selection can be directly accessed via the function call operator: \code blaze::DynamicMatrix<double,blaze::columnMajor> A; // ... Resizing and initialization // Creating a view on the first four columns of A in reverse order auto cs = columns( A, { 3UL, 2UL, 1UL, 0UL } ); // Setting the element (0,0) of the column selection, which corresponds // to the element at position (0,3) in matrix A cs(0,0) = 2.0; \endcode // Alternatively, the elements of a column selection can be traversed via (const) iterators. // Just as with matrices, in case of non-const column selection, \c begin() and \c end() return // an iterator, which allows to manipuate the elements, in case of constant column selection an // iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { *it = ...; // OK: Write access to the dense value. ... = *it; // OK: Read access to the dense value. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense value. } \endcode \code blaze::CompressedMatrix<int,blaze::columnMajor> A( 512UL, 256UL ); // ... Resizing and initialization // Creating a reference to a selection of columns of matrix A auto cs = columns( A, { 16UL, 32UL, 64UL, 128UL } ); // Traversing the elements of the 0th column via iterators to non-const elements for( auto it=cs.begin(0); it!=cs.end(0); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements of the 1st column via iterators to const elements for( auto it=cs.cbegin(1); it!=cs.cend(1); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_column_selections_element_insertion Element Insertion // // Inserting/accessing elements in a sparse column selection can be done by several alternative // functions. The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::columnMajor> A( 512UL, 256UL ); // Non-initialized matrix of size 512x256 auto cs = columns( A, { 10UL, 20UL, 30UL, 40UL } ); // View on the columns 10, 20, 30, and 40 of A // The function call operator provides access to all possible elements of the sparse column // selection, including the zero elements. In case the function call operator is used to // access an element that is currently not stored in the sparse column selection, the element // is inserted into the column selection. cs(2,4) = 2.0; // The second operation for inserting elements is the set() function. In case the element is // not contained in the column selection it is inserted into the column selection, if it is // already contained in the column selection its value is modified. cs.set( 2UL, 5UL, -1.2 ); // An alternative for inserting elements into the column selection is the insert() function. // However, it inserts the element only in case the element is not already contained in the // column selection. cs.insert( 2UL, 6UL, 3.7 ); // Just as in the case of sparse matrices, elements can also be inserted via the append() // function. In case of column selections, append() also requires that the appended element's // index is strictly larger than the currently largest non-zero index in the according column // of the column selection and that the according column's capacity is large enough to hold the // new element. Note however that due to the nature of a column selection, which may be an alias // to an arbitrary collection of columns, the append() function does not work as efficiently // for a column selection as it does for a matrix. cs.reserve( 2UL, 10UL ); cs.append( 2UL, 10UL, -2.1 ); \endcode // \n \section views_column_selections_common_operations Common Operations // // A view on specific columns of a matrix can be used like any other dense or sparse matrix. For // instance, the current size of the matrix, i.e. the number of rows or columns can be obtained // via the \c rows() and \c columns() functions, the current total capacity via the \c capacity() // function, and the number of non-zero elements via the \c nonZeros() function. However, since // column selections are views on specific columns of a matrix, several operations are not possible, // such as resizing and swapping: \code blaze::DynamicMatrix<int,blaze::columnMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a view on the columns 8, 16, 24, and 32 of matrix A auto cs = columns( A, { 8UL, 16UL, 24UL, 32UL } ); cs.rows(); // Returns the number of rows of the column selection cs.columns(); // Returns the number of columns of the column selection cs.capacity(); // Returns the capacity of the column selection cs.nonZeros(); // Returns the number of non-zero elements contained in the column selection cs.resize( 10UL, 8UL ); // Compilation error: Cannot resize a column selection auto cs2 = columns( A, 9UL, 17UL, 25UL, 33UL ); swap( cs, cs2 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_column_selections_arithmetic_operations Arithmetic Operations // // Both dense and sparse column selections can be used in all arithmetic operations that any other // dense or sparse matrix can be used in. The following example gives an impression of the use of // dense column selctions within arithmetic operations. All operations (addition, subtraction, // multiplication, scaling, ...) can be performed on all possible combinations of dense and // sparse matrices with fitting element types: \code blaze::DynamicMatrix<double,blaze::columnMajor> D1, D2, D3; blaze::CompressedMatrix<double,blaze::columnMajor> S1, S2; blaze::CompressedVector<double,blaze::columnVector> a, b; // ... Resizing and initialization std::initializer_list<size_t> indices1{ 0UL, 3UL, 6UL, 9UL, 12UL, 15UL, 18UL, 21UL }; std::initializer_list<size_t> indices2{ 1UL, 4UL, 7UL, 10UL, 13UL, 16UL, 19UL, 22UL }; std::initializer_list<size_t> indices3{ 2UL, 5UL, 8UL, 11UL, 14UL, 17UL, 20UL, 23UL }; auto cs = columns( D1, indices1 ); // Selecting the every third column of D1 in the range [0..21] cs = D2; // Dense matrix assignment to the selected columns columns( D1, indices2 ) = S1; // Sparse matrix assignment to the selected columns D3 = cs + D2; // Dense matrix/dense matrix addition S2 = S1 - columns( D1, indices2 ); // Sparse matrix/dense matrix subtraction D2 = cs % columns( D1, indices3 ); // Dense matrix/dense matrix Schur product D2 = columns( D1, indices2 ) * D1; // Dense matrix/dense matrix multiplication columns( D1, indices2 ) *= 2.0; // In-place scaling of the second selection of columns D2 = columns( D1, indices3 ) * 2.0; // Scaling of the elements in the third selection of columns D2 = 2.0 * columns( D1, indices3 ); // Scaling of the elements in the third selection of columns columns( D1, indices1 ) += D2; // Addition assignment columns( D1, indices2 ) -= S1; // Subtraction assignment columns( D1, indices3 ) %= cs; // Schur product assignment a = columns( D1, indices1 ) * b; // Dense matrix/sparse vector multiplication \endcode // \n \section views_column_selections_on_row_major_matrix Column Selections on a Row-Major Matrix // // Especially noteworthy is that column selections can be created for both row-major and // column-major matrices. Whereas the interface of a row-major matrix only allows to traverse a // row directly and the interface of a column-major matrix only allows to traverse a column, via // views it is possible to traverse a row of a column-major matrix or a column of a row-major // matrix. For instance: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 64UL, 32UL ); // ... Resizing and initialization // Creating a reference to the 1st and 3rd column of a column-major matrix A auto cs = columns( A, { 1UL, 3UL } ); // Traversing column 0 of the selection, which corresponds to the 1st column of matrix A for( auto it=cs.begin( 0UL ); it!=cs.end( 0UL ); ++it ) { // ... } \endcode // However, please note that creating a column selection on a matrix stored in a row-major fashion // can result in a considerable performance decrease in comparison to a column selection on a // matrix with column-major storage format. This is due to the non-contiguous storage of the // matrix elements. Therefore care has to be taken in the choice of the most suitable storage // order: \code // Setup of two row-major matrices blaze::DynamicMatrix<double,blaze::rowMajor> A( 128UL, 128UL ); blaze::DynamicMatrix<double,blaze::rowMajor> B( 128UL, 128UL ); // ... Resizing and initialization // The computation of the 15th, 30th, and 45th column of the multiplication between A and B ... blaze::DynamicMatrix<double,blaze::columnMajor> x = columns( A * B, { 15UL, 30UL, 45UL } ); // ... is essentially the same as the following computation, which multiplies // A with the 15th, 30th, and 45th column of the row-major matrix B. blaze::DynamicMatrix<double,blaze::columnMajor> x = A * column( B, { 15UL, 30UL, 45UL } ); \endcode // Although \b Blaze performs the resulting matrix/matrix multiplication as efficiently as possible // using a column-major storage order for matrix \c A would result in a more efficient evaluation. // // \n Previous: \ref views_columns &nbsp; &nbsp; Next: \ref views_bands */ //************************************************************************************************* //**Bands****************************************************************************************** /*!\page views_bands Bands // // \tableofcontents // // // Bands provide views on a specific band of a dense or sparse matrix (e.g. the diagonal, the // subdiagonal, ...). As such, bands act as a reference to a specific band. This reference // is valid and can be used in every way any other vector can be used as long as the matrix // containing the band is not resized or entirely destroyed. The band also acts as an alias to // the band elements: Changes made to the elements (e.g. modifying values, inserting or erasing // elements) are immediately visible in the matrix and changes made via the matrix are immediately // visible in the band. // // // \n \section views_bands_setup Setup of Bands // <hr> // // \image html band.png // \image latex band.eps "Band view" width=250pt // // A reference to a dense or sparse band can be created very conveniently via the \c band() // function. It can be included via the header file \code #include <blaze/math/Band.h> \endcode // The band index must be in the range from \f$[min(0,1-M)..max(0,N-1)]\f$, where \c M is the // total number of rows and \c N is the total number of columns, and can be specified both at // compile time or at runtime: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the 1st lower band of matrix A (compile time index) auto band1 = band<-1L>( A ); // Creating a reference to the 2nd upper band of matrix A (runtime index) auto band2 = band( A, 2L ); \endcode // In addition, the \c diagonal() function provides a convenient shortcut for the setup of a view // on the diagonal of a dense or sparse matrix. It has the same effect as calling the \c band() // function with a compile time index of 0: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a reference to the diagonal of matrix A via the band() and diagonal() functions auto diag1 = band<0L>( A ); auto diag2 = diagonal( A ); static_assert( blaze::IsSame< decltype(diag1), decltype(diag2) >::value, "Non-identical types detected" ); \endcode // Both the \c band() and the \c diagonal() function return an expression representing the band // view. The type of this expression depends on the given arguments, primarily the type of the // matrix and the compile time arguments. If the type is required, it can be determined via // \c decltype specifier: \code using MatrixType = blaze::DynamicMatrix<int>; using BandType = decltype( blaze::band<1L>( std::declval<MatrixType>() ) ); using DiagonalType = decltype( blaze::diagonal( std::declval<MatrixType>() ) ); \endcode // This resulting view can be treated as any other vector, i.e. it can be assigned to, it can // be copied from, and it can be used in arithmetic operations. By default, bands are considered // column vectors, but this setting can be changed via the \c defaultTransposeFlag switch. The // reference can also be used on both sides of an assignment: The band can either be used as an // alias to grant write access to a specific band of a matrix primitive on the left-hand side of // an assignment or to grant read-access to a specific band of a matrix primitive or expression // on the right-hand side of an assignment. The following example demonstrates this in detail: \code blaze::DynamicVector<double,blaze::rowVector> x; blaze::CompressedVector<double,blaze::rowVector> y; blaze::DynamicMatrix<double,blaze::rowMajor> A, B; blaze::CompressedMatrix<double,blaze::rowMajor> C, D; // ... Resizing and initialization // Setting the 2nd upper band of matrix A to x auto band2 = band( A, 2L ); band2 = x; // Setting the 3rd upper band of matrix B to y band( B, 3L ) = y; // Setting x to the 2nd lower band of the result of the matrix multiplication x = band( A * B, -2L ); // Setting y to the 2nd upper band of the result of the sparse matrix multiplication y = band( C * D, 2L ); \endcode // \warning It is the programmer's responsibility to ensure the band does not outlive the viewed // matrix: \code // Creating a band on a temporary matrix; results in a dangling reference! auto band1 = band<1L>( DynamicMatrix<int>{ { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } } ); \endcode // \n \section views_bands_element_access Element Access // <hr> // // The elements of a band can be directly accessed with the subscript operator: \code blaze::DynamicMatrix<double,blaze::rowMajor> A; // ... Resizing and initialization // Creating a view on the 4th upper band of matrix A auto band4 = band( A, 4L ); // Setting the 1st element of the dense band, which corresponds // to the 1st element in the 4th upper band of matrix A band4[1] = 2.0; \endcode // The numbering of the band elements is \f[\left(\begin{array}{*{5}{c}} 0 & 1 & 2 & \cdots & N-1 \\ \end{array}\right),\f] // where N is the number of elements of the referenced band. Alternatively, the elements of a band // can be traversed via iterators. Just as with vectors, in case of non-const band, \c begin() and // \c end() return an iterator, which allows to manipulate the elements, in case of constant bands // an iterator to immutable elements is returned: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th upper band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { *it = ...; // OK; Write access to the dense band value ... = *it; // OK: Read access to the dense band value. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { *it = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = *it; // OK: Read access to the dense band value. } \endcode \code blaze::CompressedMatrix<int,blaze::rowMajor> A( 128UL, 256UL ); // ... Resizing and initialization // Creating a reference to the 5th band of matrix A auto band5 = band( A, 5L ); // Traversing the elements via iterators to non-const elements for( auto it=band5.begin(); it!=band5.end(); ++it ) { it->value() = ...; // OK: Write access to the value of the non-zero element. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } // Traversing the elements via iterators to const elements for( auto it=band5.cbegin(); it!=band5.cend(); ++it ) { it->value() = ...; // Compilation error: Assignment to the value via iterator-to-const is invalid. ... = it->value(); // OK: Read access to the value of the non-zero element. it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed. ... = it->index(); // OK: Read access to the index of the sparse element. } \endcode // \n \section views_bands_element_insertion Element Insertion // <hr> // // Inserting/accessing elements in a sparse band can be done by several alternative functions. // The following example demonstrates all options: \code blaze::CompressedMatrix<double,blaze::rowMajor> A( 10UL, 100UL ); // Non-initialized 10x100 matrix auto diag( band( A, 0L ) ); // Reference to the diagonal of A // The subscript operator provides access to all possible elements of the sparse band, // including the zero elements. In case the subscript operator is used to access an element // that is currently not stored in the sparse band, the element is inserted into the band. diag[42] = 2.0; // The second operation for inserting elements is the set() function. In case the element // is not contained in the band it is inserted into the band, if it is already contained in // the band its value is modified. diag.set( 45UL, -1.2 ); // An alternative for inserting elements into the band is the insert() function. However, // it inserts the element only in case the element is not already contained in the band. diag.insert( 50UL, 3.7 ); \endcode // \n \section views_bands_common_operations Common Operations // <hr> // // A band view can be used like any other column vector. This means that with only a few // exceptions all \ref vector_operations and \ref arithmetic_operations can be used. For instance, // the current number of band elements can be obtained via the \c size() function, the current // capacity via the \c capacity() function, and the number of non-zero elements via the // \c nonZeros() function. However, since bands are references to specific bands of a matrix, // several operations are not possible, such as resizing and swapping. The following example // shows this by means of a dense band view: \code blaze::DynamicMatrix<int,blaze::rowMajor> A( 42UL, 42UL ); // ... Resizing and initialization // Creating a reference to the 2nd upper band of matrix A auto band2 = band( A, 2L ); band2.size(); // Returns the number of elements in the band band2.capacity(); // Returns the capacity of the band band2.nonZeros(); // Returns the number of non-zero elements contained in the band band2.resize( 84UL ); // Compilation error: Cannot resize a single band of a matrix auto band3 = band( A, 3L ); swap( band2, band3 ); // Compilation error: Swap operation not allowed \endcode // \n \section views_bands_arithmetic_operations Arithmetic Operations // <hr> // // Both dense and sparse bands can be used in all arithmetic operations that any other dense or // sparse vector can be used in. The following example gives an impression of the use of dense // bands within arithmetic operations. All operations (addition, subtraction, multiplication, // scaling, ...) can be performed on all possible combinations of dense and sparse bands with // fitting element types: \code blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b; blaze::CompressedVector<double,blaze::columnVector> c( 2UL ); c[1] = 3.0; blaze::DynamicMatrix<double,blaze::rowMajor> A( 4UL, 2UL ); // Non-initialized 4x2 matrix auto band1( band( A, 1L ) ); // Reference to the 1st upper band of A auto diag ( band( A, 0L ) ); // Reference to the diagonal of A band1[0] = 0.0; // Manual initialization of the 1st upper band of A diag = 1.0; // Homogeneous initialization of the diagonal of A band( A, -1L ) = a; // Dense vector initialization of the 1st lower band of A band( A, -2L ) = c; // Sparse vector initialization of the 2nd lower band of A b = diag + a; // Dense vector/dense vector addition b = c + band( A, -1L ); // Sparse vector/dense vector addition b = diag * band( A, -2L ); // Component-wise vector multiplication band( A, -1L ) *= 2.0; // In-place scaling of the 1st upper band b = band( A, -1L ) * 2.0; // Scaling of the 1st upper band b = 2.0 * band( A, -1L ); // Scaling of the 1st upper band band( A, -2L ) += a; // Addition assignment band( A, -2L ) -= c; // Subtraction assignment band( A, -2L ) *= band( A, 0L ); // Multiplication assignment double scalar = trans( c ) * band( A, -1L ); // Scalar/dot/inner product between two vectors A = band( A, -1L ) * trans( c ); // Outer product between two vectors \endcode // \n Previous: \ref views_column_selections &nbsp; &nbsp; Next: \ref arithmetic_operations */ //************************************************************************************************* //**Arithmetic Operations************************************************************************** /*!\page arithmetic_operations Arithmetic Operations // // \tableofcontents // // // \b Blaze provides the following arithmetic operations for vectors and matrices: // // <ul> // <li> \ref addition </li> // <li> \ref subtraction </li> // <li> \ref scalar_multiplication </li> // <li> \ref vector_vector_multiplication // <ul> // <li> \ref componentwise_multiplication </li> // <li> \ref inner_product </li> // <li> \ref outer_product </li> // <li> \ref cross_product </li> // </ul> // </li> // <li> \ref vector_vector_division </li> // <li> \ref matrix_vector_multiplication </li> // <li> \ref matrix_matrix_multiplication </li> // </ul> // // \n Previous: \ref views_bands &nbsp; &nbsp; Next: \ref addition */ //************************************************************************************************* //**Addition*************************************************************************************** /*!\page addition Addition // // The addition of vectors and matrices is as intuitive as the addition of scalar values. For both // the vector addition as well as the matrix addition the addition operator can be used. It even // enables the addition of dense and sparse vectors as well as the addition of dense and sparse // matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 + v2; // Addition of a two column vectors of different data type \endcode \code blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // add vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 + v2; // Compilation error: Cannot add a column vector and a row vector v1 + trans( v2 ); // OK: Addition of two column vectors \endcode // In case of matrices, however, it is possible to add row-major and column-major matrices. Note // however that in favor of performance the addition of two matrices with the same storage order // is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 + v2; // Vectorized addition of two double precision vectors \endcode \code blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices \endcode // \n Previous: \ref arithmetic_operations &nbsp; &nbsp; Next: \ref subtraction */ //************************************************************************************************* //**Subtraction************************************************************************************ /*!\page subtraction Subtraction // // The subtraction of vectors and matrices works exactly as intuitive as the addition, but with // the subtraction operator. For both the vector subtraction as well as the matrix subtraction // the subtraction operator can be used. It also enables the subtraction of dense and sparse // vectors as well as the subtraction of dense and sparse matrices: \code blaze::DynamicVector<int> v1( 5UL ), v3; blaze::CompressedVector<float> v2( 5UL ); // ... Initializing the vectors v3 = v1 - v2; // Subtraction of a two column vectors of different data type blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL ); blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3; // ... Initializing the matrices M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type \endcode // Note that it is necessary that both operands have exactly the same dimensions. Violating this // precondition results in an exception. Also note that in case of vectors it is only possible to // subtract vectors with the same transpose flag: \code blaze::DynamicVector<int,columnVector> v1( 5UL ); blaze::CompressedVector<float,rowVector> v2( 5UL ); v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector v1 - trans( v2 ); // OK: Subtraction of two column vectors \endcode // In case of matrices, however, it is possible to subtract row-major and column-major matrices. // Note however that in favor of performance the subtraction of two matrices with the same storage // order is favorable. The same argument holds for the element type: In case two vectors or matrices // with the same element type are added, the performance can be much higher due to vectorization // of the operation. \code blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3; // ... Initialization of the vectors v3 = v1 - v2; // Vectorized subtraction of two double precision vectors blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3; // ... Initialization of the matrices M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices \endcode // \n Previous: \ref addition &nbsp; &nbsp; Next: \ref scalar_multiplication */ //************************************************************************************************* //**Scalar Multiplication************************************************************************** /*!\page scalar_multiplication Scalar Multiplication // // The scalar multiplication is the multiplication of a scalar value with a vector or a matrix. // In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar // values. Additionally, it is possible to use std::complex values with the same built-in data // types as element type. \code blaze::StaticVector<int,3UL> v1{ 1, 2, 3 }; blaze::DynamicVector<double> v2 = v1 * 1.2; blaze::CompressedVector<float> v3 = -0.3F * v1; \endcode \code blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } }; blaze::DynamicMatrix<double> M2 = M1 * 1.2; blaze::CompressedMatrix<float> M3 = -0.3F * M1; \endcode // Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the // following example). However, each vector and matrix provides the \c scale() function, which // can be used to scale a vector or matrix element-wise with arbitrary scalar data types: \code blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1; blaze::StaticMatrix<int,3UL,3UL> scalar; M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication M1.scale( scalar ); // Scalar multiplication \endcode // \n Previous: \ref subtraction &nbsp; &nbsp; Next: \ref componentwise_multiplication */ //************************************************************************************************* //**Vector/Vector Multiplication******************************************************************* /*!\page vector_vector_multiplication Vector/Vector Multiplication // // \n \section componentwise_multiplication Componentwise Multiplication // <hr> // // Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or // blaze::rowVector) via the multiplication operator results in a componentwise multiplication // of the two vectors: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and // a dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row // vectors. The result is a dense row vector. \endcode // \n \section inner_product Inner Product / Scalar Product / Dot Product // <hr> // // The multiplication between a row vector and a column vector results in an inner product between // the two vectors: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; int result = v1 * v2; // Results in the value 15 \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = v1 * trans( v2 ); // Also results in the value 15 \endcode // Alternatively, either the \c inner() function, the \c dot() function or the comma operator can // be used for any combination of vectors (row or column vectors) to perform an inner product: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; // All alternatives for the inner product between a column vector and a row vector int result1 = trans( v1 ) * trans( v2 ); int result2 = inner( v1, v2 ); int result3 = dot( v1, v2 ); int result4 = (v1,v2); \endcode // When using the comma operator, please note the brackets embracing the inner product expression. // Due to the low precedence of the comma operator (lower even than the assignment operator) these // brackets are strictly required for a correct evaluation of the inner product. // // // \n \section outer_product Outer Product // <hr> // // The multiplication between a column vector and a row vector results in the outer product of // the two vectors: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = v1 * v2; \endcode // The \c trans() function can be used to transpose a vector as necessary: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; int result = trans( v1 ) * v2; \endcode // Alternatively, the \c outer() function can be used for any combination of vectors (row or column // vectors) to perform an outer product: \code blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 }; blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 }; StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors \endcode // \n \section cross_product Cross Product // <hr> // // Two vectors with the same transpose flag can be multiplied via the cross product. The cross // product between two vectors \f$ a \f$ and \f$ b \f$ is defined as \f[ \left(\begin{array}{*{1}{c}} c_0 \\ c_1 \\ c_2 \\ \end{array}\right) = \left(\begin{array}{*{1}{c}} a_1 b_2 - a_2 b_1 \\ a_2 b_0 - a_0 b_2 \\ a_0 b_1 - a_1 b_0 \\ \end{array}\right). \f] // Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is // realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%) // can be used in case infix notation is required: \code blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 }; blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 }; blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) ); blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 ); \endcode // Please note that the cross product is restricted to three dimensional (dense and sparse) // column vectors. // // \n Previous: \ref scalar_multiplication &nbsp; &nbsp; Next: \ref vector_vector_division */ //************************************************************************************************* //**Vector/Vector Division************************************************************************* /*!\page vector_vector_division Vector/Vector Division // // \n \section componentwise_division Componentwise Division // <hr> // // Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector // or blaze::rowVector) via the division operator results in a componentwise division: \code using blaze::DynamicVector; using blaze::CompressedVector; CompressedVector<int,columnVector> v1( 17UL ); DynamicVector<int,columnVector> v2( 17UL ); StaticVector<double,10UL,rowVector> v3; DynamicVector<double,rowVector> v4( 10UL ); // ... Initialization of the vectors CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a // dense column vector. The result is a sparse // column vector. DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row // vectors. The result is a dense row vector. \endcode // Note that all values of the divisor must be non-zero and that no checks are performed to assert // this precondition! // // \n Previous: \ref vector_vector_multiplication &nbsp; &nbsp; Next: \ref matrix_vector_multiplication */ //************************************************************************************************* //**Matrix/Vector Multiplication******************************************************************* /*!\page matrix_vector_multiplication Matrix/Vector Multiplication // // In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical // textbooks. Just as in textbooks there are two different multiplications between a matrix and // a vector: a matrix/column vector multiplication and a row vector/matrix multiplication: \code using blaze::StaticVector; using blaze::DynamicVector; using blaze::DynamicMatrix; DynamicMatrix<int> M1( 39UL, 12UL ); StaticVector<int,12UL,columnVector> v1; // ... Initialization of the matrix and the vector DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication \endcode // Note that the storage order of the matrix poses no restrictions on the operation. Also note, // that the highest performance for a multiplication between a dense matrix and a dense vector can // be achieved if both the matrix and the vector have the same scalar element type. // // \n Previous: \ref vector_vector_division &nbsp; &nbsp; Next: \ref matrix_matrix_multiplication */ //************************************************************************************************* //**Matrix/Matrix Multiplication******************************************************************* /*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication // // \n \section schur_product Componentwise Multiplication / Schur Product // <hr> // // Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns) // via the modulo operator results in a componentwise multiplication (Schur product) of the two // matrices: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 28UL, 35UL ); CompressedMatrix<float> M2( 28UL, 35UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 % M2; \endcode // \n \section matrix_product Matrix Product // <hr> // // The matrix/matrix product can be formulated exactly as in mathematical textbooks: \code using blaze::DynamicMatrix; using blaze::CompressedMatrix; DynamicMatrix<double> M1( 45UL, 85UL ); CompressedMatrix<float> M2( 85UL, 37UL ); // ... Initialization of the matrices DynamicMatrix<double> M3 = M1 * M2; \endcode // The storage order of the two matrices poses no restrictions on the operation, all variations // are possible. It is also possible to multiply two matrices with different element type, as // long as the element types themselves can be multiplied and added. Note however that the // highest performance for a multiplication between two matrices can be expected for two // matrices with the same scalar element type. // // In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper // triangular, or diagonal, the computation can be optimized by explicitly declaring the // multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by // means of the \ref matrix_operations_declaration_operations : \code using blaze::DynamicMatrix; DynamicMatrix<double> M1, M2, M3; // ... Initialization of the square matrices M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal \endcode // Using a declaration operation on the a multiplication expression can speed up the computation // by a factor of 2. Note however that the caller of the according declaration operation takes // full responsibility for the correctness of the declaration. Falsely declaring a multiplication // as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined // behavior! // // \n Previous: \ref matrix_vector_multiplication &nbsp; &nbsp; Next: \ref shared_memory_parallelization */ //************************************************************************************************* //**Shared Memory Parallelization****************************************************************** /*!\page shared_memory_parallelization Shared Memory Parallelization // // For all possible operations \b Blaze tries to achieve maximum performance on a single CPU // core. However, today's CPUs are not single core anymore, but provide several (homogeneous // or heterogeneous) compute cores. In order to fully exploit the performance potential of a // multicore CPU, computations have to be parallelized across all available cores of a CPU. // For this purpose, \b Blaze provides four different shared memory parallelization techniques: // // - \ref openmp_parallelization // - \ref cpp_threads_parallelization // - \ref boost_threads_parallelization // - \ref hpx_parallelization // // When any of the shared memory parallelization techniques is activated, all arithmetic // operations on dense vectors and matrices (including additions, subtractions, multiplications, // divisions, and all componentwise arithmetic operations) and most operations on sparse vectors // and matrices are automatically run in parallel. However, in addition, \b Blaze provides means // to enforce the serial execution of specific operations: // // - \ref serial_execution // // \n Previous: \ref matrix_matrix_multiplication &nbsp; &nbsp; Next: \ref openmp_parallelization */ //************************************************************************************************* //**OpenMP Parallelization************************************************************************* /*!\page openmp_parallelization OpenMP Parallelization // // \tableofcontents // // // \n \section openmp_setup OpenMP Setup // <hr> // // To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify // the use of OpenMP on the command line: \code -fopenmp // GNU/Clang C++ compiler -openmp // Intel C++ compiler /openmp // Visual Studio \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of threads. // // As common for OpenMP, the number of threads can be specified either via an environment variable \code export OMP_NUM_THREADS=4 // Unix systems set OMP_NUM_THREADS=4 // Windows systems \endcode // or via an explicit call to the \c omp_set_num_threads() function: \code omp_set_num_threads( 4 ); \endcode // Alternatively, the number of threads can also be specified via the \c setNumThreads() function // provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of OpenMP, the function returns the maximum number of threads OpenMP will use // within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function. // // // \n \section openmp_configuration OpenMP Configuration // <hr> // // Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze // deems the parallel execution as counterproductive for the overall performance, the operation // is executed serially. One of the main reasons for not executing an operation in parallel is // the size of the operands. For instance, a vector addition is only executed in parallel if the // size of both vector operands exceeds a certain threshold. Otherwise, the performance could // seriously decrease due to the overhead caused by the thread setup. However, in order to be // able to adjust the \b Blaze library to a specific system, it is possible to configure these // thresholds manually. All shared memory thresholds are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique (see also \ref cpp_threads_parallelization and // \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum // performance for all possible situations and configurations. They merely provide a reasonable // standard for the current CPU generation. // // // \n \section openmp_first_touch First Touch Policy // <hr> // // So far the \b Blaze library does not (yet) automatically initialize dynamic memory according // to the first touch principle. Consider for instance the following vector triad example: \code using blaze::columnVector; const size_t N( 1000000UL ); blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N ); // Initialization of the vectors b, c, and d for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // Performing a vector triad a = b + c * d; \endcode // If this code, which is prototypical for many OpenMP applications that have not been optimized // for ccNUMA architectures, is run across several locality domains (LD), it will not scale // beyond the maximum performance achievable on a single LD if the working set does not fit into // the cache. This is because the initialization loop is executed by a single thread, writing to // \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will // be mapped into a single LD. // // As mentioned above, this problem can be solved by performing vector initialization in parallel: \code // ... // Initialization of the vectors b, c, and d #pragma omp parallel for for( size_t i=0UL; i<N; ++i ) { b[i] = rand<double>(); c[i] = rand<double>(); d[i] = rand<double>(); } // ... \endcode // This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for // instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in // order to achieve the maximum possible performance, it is imperative to initialize the memory // according to the later use of the data structures. // // // \n \section openmp_limitations Limitations of the OpenMP Parallelization // <hr> // // There are a few important limitations to the current \b Blaze OpenMP parallelization. The first // one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the // other one the OpenMP \c sections directive (see \ref openmp_sections). // // // \n \subsection openmp_parallel The Parallel Directive // // In OpenMP threads are explicitly spawned via the an OpenMP parallel directive: \code // Serial region, executed by a single thread #pragma omp parallel { // Parallel region, executed by the specified number of threads } // Serial region, executed by a single thread \endcode // Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a // parallel directive is encountered. Therefore, from a performance point of view, it seems to be // beneficial to use a single OpenMP parallel directive for several operations: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; #pragma omp parallel { y1 = A * x; y2 = B * x; } \endcode // Unfortunately, this optimization approach is not allowed within the \b Blaze library. More // explicitly, it is not allowed to put an operation into a parallel region. The reason is that // the entire code contained within a parallel region is executed by all threads. Although this // appears to just comprise the contained computations, a computation (or more specifically the // assignment of an expression to a vector or matrix) can contain additional logic that must not // be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.). // Therefore it is not possible to manually start a parallel region for several operations, but // \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand // and the given operands. // // \n \subsection openmp_sections The Sections Directive // // OpenMP provides several work-sharing construct to distribute work among threads. One of these // constructs is the \c sections directive: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = A * x; #pragma omp section y2 = B * x; } \endcode // In this example, two threads are used to compute two distinct matrix/vector multiplications // concurrently. Thereby each of the \c sections is executed by exactly one thread. // // Unfortunately \b Blaze does not support concurrent parallel computations and therefore this // approach does not work with any of the \b Blaze parallelization techniques. All techniques // (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization // and \ref boost_threads_parallelization) are optimized for the parallel computation of an // operation within a single thread of execution. This means that \b Blaze tries to use all // available threads to compute the result of a single operation as efficiently as possible. // Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations // and to let \b Blaze compute all operations within a \c sections directive in serial. This can // be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution) // or by selectively serializing all operations within a \c sections directive via the \c serial() // function: \code blaze::DynamicVector<double> x, y1, y2; blaze::DynamicMatrix<double> A, B; // ... Resizing and initialization #pragma omp sections { #pragma omp section y1 = serial( A * x ); #pragma omp section y2 = serial( B * x ); } \endcode // Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does // NOT work in this context! // // \n Previous: \ref shared_memory_parallelization &nbsp; &nbsp; Next: \ref cpp_threads_parallelization */ //************************************************************************************************* //**C++11 Thread Parallelization******************************************************************* /*!\page cpp_threads_parallelization C++11 Thread Parallelization // // \tableofcontents // // // In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1, // \b Blaze also provides a shared memory parallelization based on C++11 threads. // // // \n \section cpp_threads_setup C++11 Thread Setup // <hr> // // In order to enable the C++11 thread-based parallelization, first the according C++11-specific // compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument // has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the // compiler flags have to be extended by \code ... -std=c++11 -DBLAZE_USE_CPP_THREADS ... \endcode // This simple action will cause the \b Blaze library to automatically try to run all operations // in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11 // threads are enabled on the command line, the OpenMP-based parallelization has priority and // is preferred. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of C++11 threads, the function will return the previously specified number of // threads. // // // \n \section cpp_threads_configuration C++11 Thread Configuration // <hr> // // As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an // operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for // the overall performance, the operation is executed serially. One of the main reasons for not // executing an operation in parallel is the size of the operands. For instance, a vector addition // is only executed in parallel if the size of both vector operands exceeds a certain threshold. // Otherwise, the performance could seriously decrease due to the overhead caused by the thread // setup. However, in order to be able to adjust the \b Blaze library to a specific system, it // is possible to configure these thresholds manually. All thresholds are contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the C++11 thread parallelization. // // // \n \section cpp_threads_known_issues Known Issues // <hr> // // There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang // if their destructor is executed after the \c main() function: // // http://connect.microsoft.com/VisualStudio/feedback/details/747145 // // Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug. // In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function, // which can be used to manually destroy all threads at the end of the \c main() function: \code int main() { // ... Using the C++11 thread parallelization of Blaze shutDownThreads(); } \endcode // Please note that this function may only be used at the end of the \c main() function. After // this function no further computation may be executed! Also note that this function has an // effect for Visual Studio compilers only and doesn't need to be used with any other compiler. // // \n Previous: \ref openmp_parallelization &nbsp; &nbsp; Next: \ref boost_threads_parallelization */ //************************************************************************************************* //**Boost Thread Parallelization******************************************************************* /*!\page boost_threads_parallelization Boost Thread Parallelization // // \tableofcontents // // // The third available shared memory parallelization provided with \b Blaze is based on Boost // threads. // // // \n \section boost_threads_setup Boost Thread Setup // <hr> // // In order to enable the Boost thread-based parallelization, two steps have to be taken: First, // the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_BOOST_THREADS ... \endcode // Second, the according Boost libraries have to be linked. These two simple actions will cause // the \b Blaze library to automatically try to run all operations in parallel with the specified // number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations // have priority, i.e. are preferred in case either is enabled in combination with the Boost // thread parallelization. // // The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS \code export BLAZE_NUM_THREADS=4 // Unix systems set BLAZE_NUM_THREADS=4 // Windows systems \endcode // or alternatively via the \c setNumThreads() function provided by the \b Blaze library: \code blaze::setNumThreads( 4 ); \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of Boost threads, the function will return the previously specified number of // threads. // // // \n \section boost_threads_configuration Boost Thread Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization). // All thresholds related to the Boost thread parallelization are also contained within the // configuration file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the Boost thread parallelization. // // \n Previous: \ref cpp_threads_parallelization &nbsp; &nbsp; Next: \ref hpx_parallelization */ //************************************************************************************************* //**HPX Parallelization**************************************************************************** /*!\page hpx_parallelization HPX Parallelization // // \tableofcontents // // // The fourth and final shared memory parallelization provided with \b Blaze is based on // <a href="http://stellar.cct.lsu.edu/projects/hpx/">HPX</a>. // // // \n \section hpx_setup HPX Setup // <hr> // // In order to enable the HPX-based parallelization, the following steps have to be taken: First, // the \c BLAZE_USE_HPX_THREADS command line argument has to be explicitly specified during // compilation: \code ... -DBLAZE_USE_HPX_THREADS ... \endcode // Second, the HPX library and depending libraries such as Boost, hwloc, etc. have to be linked. // And third, the HPX threads have to be initialized by a call to the \c hpx::init() function (see // the <a href="http://stellar.cct.lsu.edu/files/hpx_0.9.0/docs/hpx/tutorial.html">HPX tutorial</a> // for further details). These three actions will cause the \b Blaze library to automatically try // to run all operations in parallel with the specified number of HPX threads. // // Note that the OpenMP-based, C++11 thread-based, and Boost thread-based parallelizations have // priority, i.e. are preferred in case either is enabled in combination with the HPX thread // parallelization. // // The number of threads used by the HPX backend has to be specified via the command line: \code ... --hpx:threads 4 ... \endcode // Please note that the \b Blaze library does not limit the available number of threads. Therefore // it is in YOUR responsibility to choose an appropriate number of threads. The best performance, // though, can be expected if the specified number of threads matches the available number of // cores. // // In order to query the number of threads used for the parallelization of operations, the // \c getNumThreads() function can be used: \code const size_t threads = blaze::getNumThreads(); \endcode // In the context of HPX threads, the function will return the actual number of threads used by // the HPX subsystem. // // // \n \section hpx_configuration HPX Configuration // <hr> // // As in case of the other shared memory parallelizations \b Blaze is not unconditionally running // an operation in parallel (see for instance \ref openmp_parallelization). Only in case a given // operation is large enough and exceeds a certain threshold the operation is executed in parallel. // All thresholds related to the HPX-based parallelization are contained within the configuration // file <tt><blaze/config/Thresholds.h></tt>. // // Please note that these thresholds are highly sensitiv to the used system architecture and // the shared memory parallelization technique. Therefore the default values cannot guarantee // maximum performance for all possible situations and configurations. They merely provide a // reasonable standard for the current CPU generation. Also note that the provided defaults // have been determined using the OpenMP parallelization and require individual adaption for // the HPX-based parallelization. // // \n Previous: \ref boost_threads_parallelization &nbsp; &nbsp; Next: \ref serial_execution */ //************************************************************************************************* //**Serial Execution******************************************************************************* /*!\page serial_execution Serial Execution // // Sometimes it may be necessary to enforce the serial execution of specific operations. For this // purpose, the \b Blaze library offers three possible options: the serialization of a single // expression via the \c serial() function, the serialization of a block of expressions via the // \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution. // // // \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression // <hr> // // The first option is the serialization of a specific operation via the \c serial() function: \code blaze::DynamicMatrix<double> A, B, C; // ... Resizing and initialization C = serial( A + B ); \endcode // \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any // kind of dense or sparse vector or matrix expression. // // // \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions // <hr> // // The second option is the temporary and local enforcement of a serial execution via the // \c BLAZE_SERIAL_SECTION: \code using blaze::rowMajor; using blaze::columnVector; blaze::DynamicMatrix<double,rowMajor> A; blaze::DynamicVector<double,columnVector> b, c, d, x, y, z; // ... Resizing and initialization // Parallel execution // If possible and beneficial for performance the following operation is executed in parallel. x = A * b; // Serial execution // All operations executed within the serial section are guaranteed to be executed in // serial (even if a parallel execution would be possible and/or beneficial). BLAZE_SERIAL_SECTION { y = A * c; z = A * d; } // Parallel execution continued // ... \endcode // Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial. // Outside the scope of the serial section, all operations are run in parallel (if beneficial for // the performance). // // Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution. // The use of the serial section within several concurrent threads will result undefined behavior! // // // \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution // <hr> // // The third option is the general deactivation of the parallel execution (even in case OpenMP is // enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION // switch in the <tt>./blaze/config/SMP.h</tt> configuration file: \code #define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1 \endcode // In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory // parallelization is deactivated altogether. // // \n Previous: \ref hpx_parallelization &nbsp; &nbsp; Next: \ref serialization */ //************************************************************************************************* //**Serialization********************************************************************************** /*!\page serialization Serialization // // Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing // results or for sharing specific setups with other people. The \b Blaze math serialization // module provides the according functionality to create platform independent, portable, binary // representations of vectors and matrices that can be used to store the \b Blaze data structures // without loss of precision and to reliably transfer them from one machine to another. // // The following two pages explain how to serialize vectors and matrices: // // - \ref vector_serialization // - \ref matrix_serialization // // \n Previous: \ref serial_execution &nbsp; &nbsp; Next: \ref vector_serialization */ //************************************************************************************************* //**Vector Serialization*************************************************************************** /*!\page vector_serialization Vector Serialization // // The following example demonstrates the (de-)serialization of dense and sparse vectors: \code using blaze::columnVector; using blaze::rowVector; // Serialization of both vectors { blaze::StaticVector<double,5UL,rowVector> d; blaze::CompressedVector<int,columnVector> s; // ... Resizing and initialization // Creating an archive that writes into a the file "vectors.blaze" blaze::Archive<std::ofstream> archive( "vectors.blaze" ); // Serialization of both vectors into the same archive. Note that d lies before s! archive << d << s; } // Reconstitution of both vectors { blaze::DynamicVector<double,rowVector> d1; blaze::DynamicVector<int,rowVector> d2; // Creating an archive that reads from the file "vectors.blaze" blaze::Archive<std::ifstream> archive( "vectors.blaze" ); // Reconstituting the former d vector into d1. Note that it is possible to reconstitute // the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that // the type of elements has to be the same. archive >> d1; // Reconstituting the former s vector into d2. Note that is is even possible to reconstitute // a sparse vector as a dense vector (also the reverse is possible) and that a column vector // can be reconstituted as row vector (and vice versa). Note however that also in this case // the type of elements is the same! archive >> d2 } \endcode // The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can // also be used for vectors with vector or matrix element type: \code // Serialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // ... Resizing and initialization // Creating an archive that writes into a the file "vector.blaze" blaze::Archive<std::ofstream> archive( "vector.blaze" ); // Serialization of the vector into the archive archive << vec; } // Deserialization { blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec; // Creating an archive that reads from the file "vector.blaze" blaze::Archive<std::ifstream> archive( "vector.blaze" ); // Reconstitution of the vector from the archive archive >> vec; } \endcode // As the examples demonstrates, the vector serialization offers an enormous flexibility. However, // several actions result in errors: // // - vectors cannot be reconstituted as matrices (and vice versa) // - the element type of the serialized and reconstituted vector must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticVector, its size must match the size of the serialized vector // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref serialization &nbsp; &nbsp; Next: \ref matrix_serialization */ //************************************************************************************************* //**Matrix Serialization*************************************************************************** /*!\page matrix_serialization Matrix Serialization // // The serialization of matrices works in the same manner as the serialization of vectors. The // following example demonstrates the (de-)serialization of dense and sparse matrices: \code using blaze::rowMajor; using blaze::columnMajor; // Serialization of both matrices { blaze::StaticMatrix<double,3UL,5UL,rowMajor> D; blaze::CompressedMatrix<int,columnMajor> S; // ... Resizing and initialization // Creating an archive that writes into a the file "matrices.blaze" blaze::Archive<std::ofstream> archive( "matrices.blaze" ); // Serialization of both matrices into the same archive. Note that D lies before S! archive << D << S; } // Reconstitution of both matrices { blaze::DynamicMatrix<double,rowMajor> D1; blaze::DynamicMatrix<int,rowMajor> D2; // Creating an archive that reads from the file "matrices.blaze" blaze::Archive<std::ifstream> archive( "matrices.blaze" ); // Reconstituting the former D matrix into D1. Note that it is possible to reconstitute // the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that // the type of elements has to be the same. archive >> D1; // Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute // a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major // matrix can be reconstituted as row-major matrix (and vice versa). Note however that also // in this case the type of elements is the same! archive >> D2 } \endcode // Note that also in case of matrices it is possible to (de-)serialize matrices with vector or // matrix elements: \code // Serialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // ... Resizing and initialization // Creating an archive that writes into a the file "matrix.blaze" blaze::Archive<std::ofstream> archive( "matrix.blaze" ); // Serialization of the matrix into the archive archive << mat; } // Deserialization { blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat; // Creating an archive that reads from the file "matrix.blaze" blaze::Archive<std::ifstream> archive( "matrix.blaze" ); // Reconstitution of the matrix from the archive archive >> mat; } \endcode // Note that just as the vector serialization, the matrix serialization is restricted by a // few important rules: // // - matrices cannot be reconstituted as vectors (and vice versa) // - the element type of the serialized and reconstituted matrix must match, which means // that on the source and destination platform the general type (signed/unsigned integral // or floating point) and the size of the type must be exactly the same // - when reconstituting a \c StaticMatrix, the number of rows and columns must match those // of the serialized matrix // // In case an error is encountered during (de-)serialization, a \c std::runtime_exception is // thrown. // // \n Previous: \ref vector_serialization &nbsp; &nbsp; Next: \ref customization \n */ //************************************************************************************************* //**Customization********************************************************************************** /*!\page customization Customization // // Although \b Blaze tries to work out of the box for every possible setting, still it may be // necessary to adapt the library to specific requirements. The following three pages explain // how to customize the \b Blaze library to your own needs: // // - \ref configuration_files // - \ref vector_and_matrix_customization // - \ref error_reporting_customization // // \n Previous: \ref matrix_serialization &nbsp; &nbsp; Next: \ref configuration_files */ //************************************************************************************************* //**Configuration Files**************************************************************************** /*!\page configuration_files Configuration Files // // \tableofcontents // // // Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose // \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory, // which provide ample opportunity to customize internal settings, behavior, and thresholds. // This chapter explains the most important of these configuration files. For a complete // overview of all customization opportunities, please go to the configuration files in the // <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation. // // // \n \section transpose_flag Default Vector Storage // <hr> // // The \b Blaze default is that all vectors are created as column vectors (if not specified // explicitly): \code blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector \endcode // The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default // vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library. // The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector \endcode // Alternatively the default transpose flag can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector. // // // \n \section storage_order Default Matrix Storage // <hr> // // Matrices are by default created as row-major matrices: \code blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix \endcode // The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default // matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order // for all matrices of the \b Blaze library can be specified. \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor \endcode // Alternatively the default storage order can be specified via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor #include <blaze/Blaze.h> \endcode // Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor. // // // \n \section blas_mode BLAS Mode // <hr> // // In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can // be configured to use a BLAS library. Via the following compilation switch in the configuration // file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled: \code #define BLAZE_BLAS_MODE 1 \endcode // In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL // switch should be activated to prevent \b Blaze from parallelizing on its own: \code #define BLAZE_BLAS_IS_PARALLEL 1 \endcode // Alternatively, both settings can be specified via command line or by defining the symbols // manually before including any \b Blaze header file: \code #define BLAZE_BLAS_MODE 1 #define BLAZE_BLAS_IS_PARALLEL 1 #include <blaze/Blaze.h> \endcode // In case no BLAS library is available, \b Blaze will still work and will not be reduced in // functionality, but performance may be limited. // // // \n \section cache_size Cache Size // <hr> // // The optimization of several \b Blaze compute kernels depends on the cache size of the target // architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal // speed the exact cache size of the system should be provided via the \c cacheSize value in the // <tt>./blaze/config/CacheSize.h</tt> configuration file: \code #define BLAZE_CACHE_SIZE 3145728UL; \endcode // The cache size can also be specified via command line or by defining this symbol manually // before including any \b Blaze header file: \code #define BLAZE_CACHE_SIZE 3145728UL #include <blaze/Blaze.h> \endcode // \n \section vectorization Vectorization // <hr> // // In order to achieve maximum performance and to exploit the compute power of a target platform // the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or // AVX-512 intrinsics, depending on which instruction set is available. However, it is possible // to disable the vectorization entirely by the compile time switch in the configuration file // <tt>./blaze/config/Vectorization.h</tt>: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is // disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for // the operations. Note that deactivating the vectorization may pose a severe performance // limitation for a large number of operations! // // // \n \section thresholds Thresholds // <hr> // // For many computations \b Blaze distinguishes between small and large vectors and matrices. // This separation is especially important for the parallel execution of computations, since // the use of several threads only pays off for sufficiently large vectors and matrices. // Additionally, it also enables \b Blaze to select kernels that are optimized for a specific // size. // // In order to distinguish between small and large data structures \b Blaze provides several // thresholds that can be adapted to the characteristics of the target platform. For instance, // the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom // \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels // for large multiplications. All thresholds, including the thresholds for the OpenMP- and // thread-based parallelization, are contained within the configuration file // <tt><blaze/config/Thresholds.h></tt>. // // // \n \section padding Padding // <hr> // // By default the \b Blaze library uses padding for all dense vectors and matrices in order to // achieve maximum performance in all operations. Due to padding, the proper alignment of data // elements can be guaranteed and the need for remainder loops is minimized. However, on the // downside padding introduces an additional memory overhead, which can be large depending on // the used data type. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // \n \section streaming Streaming (Non-Temporal Stores) // <hr> // // For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide // a significant performance advantage of about 20%. However, this advantage is only in effect in // case the memory bandwidth of the target architecture is maxed out. If the target architecture's // memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance // instead of increasing it. // // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate streaming: \code #define BLAZE_USE_STREAMING 1 \endcode // Alternatively streaming can be (de-)activated via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_STREAMING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is // disabled. It is recommended to consult the target architecture's white papers to decide whether // streaming is beneficial or hurtful for performance. // // // \n Previous: \ref customization &nbsp; &nbsp; Next: \ref vector_and_matrix_customization \n */ //************************************************************************************************* //**Customization of Vectors and Matrices********************************************************** /*!\page vector_and_matrix_customization Customization of Vectors and Matrices // // \tableofcontents // // // \n \section custom_data_members Custom Data Members // <hr> // // So far the \b Blaze library does not provide a lot of flexibility to customize the data // members of existing \ref vector_types and \ref matrix_types. However, to some extend it is // possible to customize vectors and matrices by inheritance. The following example gives an // impression on how to create a simple variation of \ref matrix_types_custom_matrix, which // automatically takes care of acquiring and releasing custom memory. \code template< typename Type // Data type of the matrix , bool SO = defaultStorageOrder > // Storage order class MyCustomMatrix : public CustomMatrix< Type, unaligned, unpadded, SO > { public: explicit inline MyCustomMatrix( size_t m, size_t n ) : CustomMatrix<Type,unaligned,unpadded,SO>() , array_( new Type[m*n] ) { this->reset( array_.get(), m, n ); } private: std::unique_ptr<Type[]> array_; }; \endcode // Please note that this is a simplified example with the intent to show the general approach. // The number of constructors, the memory acquisition, and the kind of memory management can of // course be adapted to specific requirements. Also, please note that since none of the \b Blaze // vectors and matrices have virtual destructors polymorphic destruction cannot be used. // // // \n \section custom_operations Custom Operations // <hr> // // There are two approaches to extend \b Blaze with custom operations. First, the \c map() // functions provide the possibility to execute componentwise custom operations on vectors and // matrices. Second, it is possible to add customized free functions. // // \n \subsection custom_operations_map The map() Functions // // Via the unary and binary \c map() functions it is possible to execute componentwise custom // operations on vectors and matrices. The unary \c map() function can be used to apply a custom // operation on each single element of a dense vector or matrix or each non-zero element of a // sparse vector or matrix. For instance, the following example demonstrates a custom square // root computation on a dense matrix: \code blaze::DynamicMatrix<double> A, B; B = map( A, []( double d ) { return std::sqrt( d ); } ); \endcode // The binary \c map() function can be used to apply an operation pairwise to the elements of // two dense vectors or two dense matrices. The following example demonstrates the merging of // two matrices of double precision values into a matrix of double precision complex numbers: \code blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } }; blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } }; blaze::DynamicMatrix< complex<double> > cplx; // Creating the matrix // ( (-2.1, 0.3) (-4.2, -1.4) ) // ( ( 1.0, 2.9) ( 0.6, -3.4) ) cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } ); \endcode // These examples demonstrate the most convenient way of defining a unary custom operation by // passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom // functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } }; B = map( A, Sqrt() ); \endcode // In order for the functor to work in a call to \c map() it must define a function call operator, // which accepts arguments of the type of the according vector or matrix elements. // // Although the operation is automatically parallelized depending on the size of the vector or // matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load() // function can be added to the functor, which handles the vectorized computation. Depending on // the data type this function is passed one of the following \b Blaze SIMD data types: // // <ul> // <li>SIMD data types for fundamental data types // <ul> // <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li> // <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li> // <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li> // <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li> // <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li> // <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li> // </ul> // </li> // <li>SIMD data types for complex data types // <ul> // <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li> // <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li> // <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li> // <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li> // <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li> // <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li> // <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li> // <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li> // </ul> // </li> // </ul> // // All SIMD types provide the \c value data member for a direct access to the underlying intrinsic // data element. In the following example, this intrinsic element is passed to the AVX function // \c _mm256_sqrt_pd(): \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } SIMDdouble load( const SIMDdouble& a ) const { return _mm256_sqrt_pd( a.value ); } }; \endcode // In this example, whenever vectorization is generally applicable, the \c load() function is // called instead of the function call operator for as long as the number of remaining elements // is larger-or-equal to the width of the packed SIMD type. In all other cases (which also // includes peel-off and remainder loops) the scalar operation is used. // // Please note that this example has two drawbacks: First, it will only compile in case the // intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the // availability of AVX is not taken into account. The first drawback can be alleviated by making // the \c load() function a function template. The second drawback can be dealt with by adding a // \c simdEnabled() function template to the functor: \code struct Sqrt { double operator()( double a ) const { return std::sqrt( a ); } template< typename T > T load( const T& a ) const { return _mm256_sqrt_pd( a.value ); } template< typename T > static constexpr bool simdEnabled() { #if defined(__AVX__) return true; #else return false; #endif } }; \endcode // The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether // or not vectorization is available for the given data type \c T. In case the function returns // \c true, the \c load() function is used for a vectorized evaluation, in case the function // returns \c false, \c load() is not called. // // Note that this is a simplified example that is only working when used for dense vectors and // matrices with double precision floating point elements. The following code shows the complete // implementation of the according functor that is used within the \b Blaze library. The \b Blaze // \c Sqrt functor is working for all data types that are providing a square root operation: \code namespace blaze { struct Sqrt { template< typename T > BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const { return sqrt( a ); } template< typename T > static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; } template< typename T > BLAZE_ALWAYS_INLINE auto load( const T& a ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T ); return sqrt( a ); } }; } // namespace blaze \endcode // The same approach can be taken for binary custom operations. The following code demonstrates // the \c Min functor of the \b Blaze library, which is working for all data types that provide // a \c min() operation: \code struct Min { explicit inline Min() {} template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const { return min( a, b ); } template< typename T1, typename T2 > static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; } template< typename T1, typename T2 > BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const { BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 ); BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 ); return min( a, b ); } }; \endcode // For more information on the available \b Blaze SIMD data types and functions, please see the // SIMD module in the complete \b Blaze documentation. // // \n \subsection custom_operations_free_functions Free Functions // // In order to extend \b Blaze with new functionality it is possible to add free functions. Free // functions can be used either as wrappers around calls to the map() function or to implement // general, non-componentwise operations. The following two examples will demonstrate both ideas. // // The first example shows the \c setToZero() function, which resets a sparse matrix to zero // without affecting the sparsity pattern. It is implemented as a convenience wrapper around // the map() function: \code template< typename MT // Type of the sparse matrix , bool SO > // Storage order void setToZero( blaze::SparseMatrix<MT,SO>& mat ) { (~mat) = blaze::map( ~mat, []( int ){ return 0; } ); } \endcode // The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and // provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the // <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a> // it also enables a conversion back to the actual type. This downcast is performed via the tilde // operator (i.e. \c operator~()). The template parameter \c SO represents the storage order // (blaze::rowMajor or blaze::columnMajor) of the matrix. // // The second example shows the \c countZeros() function, which counts the number of values, which // are exactly zero, in a dense, row-major matrix: \code template< typename MT > size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat ) { const size_t M( (~mat).rows() ); const size_t N( (~mat).columns() ); size_t count( 0UL ); for( size_t i=0UL; i<M; ++i ) { for( size_t j=0UL; j<N; ++j ) { if( blaze::isDefault<strict>( (~mat)(i,j) ) ) ++count; } } return count; } \endcode // The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again, // it is possible to perform the conversion to the actual type via the tilde operator. // // The following two listings show the declarations of all vector and matrix base classes, which // can be used for custom free functions: \code template< typename VT // Concrete type of the dense or sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class Vector; template< typename VT // Concrete type of the dense vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class DenseVector; template< typename VT // Concrete type of the sparse vector , bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector) class SparseVector; \endcode \code template< typename MT // Concrete type of the dense or sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class Matrix; template< typename MT // Concrete type of the dense matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class DenseMatrix; template< typename MT // Concrete type of the sparse matrix , bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor) class SparseMatrix; \endcode // \n \section custom_data_types Custom Data Types // <hr> // // The \b Blaze library tries hard to make the use of custom data types as convenient, easy and // intuitive as possible. However, unfortunately it is not possible to meet the requirements of // all possible data types. Thus it might be necessary to provide \b Blaze with some additional // information about the data type. The following sections give an overview of the necessary steps // to enable the use of the hypothetical custom data type \c custom::double_t for vector and // matrix operations. For example: \code blaze::DynamicVector<custom::double_t> a, b, c; // ... Resizing and initialization c = a + b; \endcode // The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+() // for additions, \c operator-() for subtractions, \c operator*() for multiplications and // \c operator/() for divisions. If any of these functions is missing it is necessary to implement // the operator to perform the according operation. For this example we assume that the custom // data type provides the four following functions instead of operators: \code namespace custom { double_t add ( const double_t& a, const double_t b ); double_t sub ( const double_t& a, const double_t b ); double_t mult( const double_t& a, const double_t b ); double_t div ( const double_t& a, const double_t b ); } // namespace custom \endcode // The following implementations will satisfy the requirements of the \b Blaze library: \code inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b ) { return add( a, b ); } inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b ) { return sub( a, b ); } inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b ) { return mult( a, b ); } inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b ) { return div( a, b ); } \endcode // \b Blaze will use all the information provided with these functions (for instance the return // type) to properly handle the operations. In the rare case that the return type cannot be // automatically determined from the operator it might be additionally necessary to provide a // specialization of the following four \b Blaze class templates: \code namespace blaze { template<> struct AddTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct SubTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct MultTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; template<> struct DivTrait<custom::double_t,custom::double_t> { using Type = custom::double_t; }; } // namespace blaze \endcode // The same steps are necessary if several custom data types need to be combined (as for instance // \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to // be taken into account: \code custom::double_t operator+( const custom::double_t& a, const custom::float_t& b ); custom::double_t operator+( const custom::float_t& a, const custom::double_t& b ); // ... \endcode // Please note that only built-in data types apply for vectorization and thus custom data types // cannot achieve maximum performance! // // // \n Previous: \ref configuration_files &nbsp; &nbsp; Next: \ref custom_operations \n */ //************************************************************************************************* //**Customization of the Error Reporting Mechanism************************************************* /*!\page error_reporting_customization Customization of the Error Reporting Mechanism // // \tableofcontents // // // \n \section error_reporting_background Background // <hr> // // The default way of \b Blaze to report errors of any kind is to throw a standard exception. // However, although in general this approach works well, in certain environments and under // special circumstances exceptions may not be the mechanism of choice and a different error // reporting mechanism may be desirable. For this reason, \b Blaze provides several macros, // which enable the customization of the error reporting mechanism. Via these macros it is // possible to replace the standard exceptions by some other exception type or a completely // different approach to report errors. // // // \n \section error_reporting_general_customization Customization of the Reporting Mechanism // <hr> // // In some cases it might be necessary to adapt the entire error reporting mechanism and to // replace it by some other means to signal failure. The primary macro for this purpose is the // \c BLAZE_THROW macro: \code #define BLAZE_THROW( EXCEPTION ) \ throw EXCEPTION \endcode // This macro represents the default mechanism of the \b Blaze library to report errors of any // kind. In order to customize the error reporing mechanism all that needs to be done is to // define the macro prior to including any \b Blaze header file. This will cause the \b Blaze // specific mechanism to be overridden. The following example demonstrates this by replacing // exceptions by a call to a \c log() function and a direct call to abort: \code #define BLAZE_THROW( EXCEPTION ) \ log( "..." ); \ abort() #include <blaze/Blaze.h> \endcode // Doing this will trigger a call to \c log() and an abort instead of throwing an exception // whenever an error (such as an invalid argument) is detected. // // \note It is possible to execute several statements instead of executing a single statement to // throw an exception. Also note that it is recommended to define the macro such that a subsequent // semicolon is required! // // \warning This macro is provided with the intention to assist in adapting \b Blaze to special // conditions and environments. However, the customization of the error reporting mechanism via // this macro can have a significant effect on the library. Thus be advised to use the macro // with due care! // // // \n \section error_reporting_exception_customization Customization of the Type of Exceptions // <hr> // // In addition to the customization of the entire error reporting mechanism it is also possible // to customize the type of exceptions being thrown. This can be achieved by customizing any // number of the following macros: \code #define BLAZE_THROW_BAD_ALLOC \ BLAZE_THROW( std::bad_alloc() ) #define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \ BLAZE_THROW( std::logic_error( MESSAGE ) ) #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( std::invalid_argument( MESSAGE ) ) #define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \ BLAZE_THROW( std::length_error( MESSAGE ) ) #define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \ BLAZE_THROW( std::out_of_range( MESSAGE ) ) #define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \ BLAZE_THROW( std::runtime_error( MESSAGE ) ) \endcode // In order to customize the type of exception the according macro has to be defined prior to // including any \b Blaze header file. This will override the \b Blaze default behavior. The // following example demonstrates this by replacing \c std::invalid_argument by a custom // exception type: \code class InvalidArgument { public: InvalidArgument(); explicit InvalidArgument( const std::string& message ); // ... }; #define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \ BLAZE_THROW( InvalidArgument( MESSAGE ) ) #include <blaze/Blaze.h> \endcode // By manually defining the macro, an \c InvalidArgument exception is thrown instead of a // \c std::invalid_argument exception. Note that it is recommended to define the macro such // that a subsequent semicolon is required! // // \warning These macros are provided with the intention to assist in adapting \b Blaze to // special conditions and environments. However, the customization of the type of an exception // via this macro may have an effect on the library. Thus be advised to use the macro with due // care! // // // \n \section error_reporting_special_errors Customization of Special Errors // <hr> // // Last but not least it is possible to customize the error reporting for special kinds of errors. // This can be achieved by customizing any number of the following macros: \code #define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) #define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \ BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \endcode // As explained in the previous sections, in order to customize the handling of special errors // the according macro has to be defined prior to including any \b Blaze header file. This will // override the \b Blaze default behavior. // // // \n Previous: \ref vector_and_matrix_customization &nbsp; &nbsp; Next: \ref blas_functions \n */ //************************************************************************************************* //**BLAS Functions********************************************************************************* /*!\page blas_functions BLAS Functions // // \tableofcontents // // // For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices // \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements // several convenient C++ wrapper functions for several BLAS functions. The following sections // give a complete overview of all available BLAS level 1, 2 and 3 functions. // // // \n \section blas_level_1 BLAS Level 1 // <hr> // // \subsection blas_level_1_dotu Dot Product (dotu) // // The following wrapper functions provide a generic interface for the BLAS functions for the // dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()): \code namespace blaze { float dotu( int n, const float* x, int incX, const float* y, int incY ); double dotu( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotu( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotu( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc) // // The following wrapper functions provide a generic interface for the BLAS functions for the // complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(), // and \c zdotc_sub()): \code namespace blaze { float dotc( int n, const float* x, int incX, const float* y, int incY ); double dotc( int n, const double* x, int incX, const double* y, int incY ); complex<float> dotc( int n, const complex<float>* x, int incX, const complex<float>* y, int incY ); complex<double> dotc( int n, const complex<double>* x, int incX, const complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2 > ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y ); } // namespace blaze \endcode // \subsection blas_level_1_axpy Axpy Product (axpy) // // The following wrapper functions provide a generic interface for the BLAS functions for the // axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()): \code namespace blaze { void axpy( int n, float alpha, const float* x, int incX, float* y, int incY ); void axpy( int n, double alpha, const double* x, int incX, double* y, int incY ); void axpy( int n, complex<float> alpha, const complex<float>* x, int incX, complex<float>* y, int incY ); void axpy( int n, complex<double> alpha, const complex<double>* x, int incX, complex<double>* y, int incY ); template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST > void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha ); } // namespace blaze \endcode // \n \section blas_level_2 BLAS Level 2 // <hr> // // \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()): \code namespace blaze { void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha, const float* A, int lda, const float* x, int incX, float beta, float* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha, const double* A, int lda, const double* x, int incX, double beta, double* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* x, int incX, complex<float> beta, complex<float>* y, int incY ); void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* x, int incX, complex<double> beta, complex<double>* y, int incY ); template< typename VT1, typename MT1, bool SO, typename VT2, typename ST > void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A, const DenseVector<VT2,false>& x, ST alpha, ST beta ); template< typename VT1, typename VT2, typename MT1, bool SO, typename ST > void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x, const DenseMatrix<MT1,SO>& A, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(), // and \c ztrmv()): \code namespace blaze { void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const float* A, int lda, float* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const double* A, int lda, double* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); template< typename VT, typename MT, bool SO > void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo ); } // namespace blaze \endcode // \n \section blas_level_3 BLAS Level 3 // <hr> // // \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()): \code namespace blaze { void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, float alpha, const float* A, int lda, const float* B, int ldb, float beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, double alpha, const double* A, int lda, const double* B, int ldb, double beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda, const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc ); void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB, int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda, const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST > void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta ); } // namespace blaze \endcode // \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm) // // The following wrapper functions provide a generic interface for the BLAS functions for the // matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and // \c ztrmm()): \code namespace blaze { void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n \subsection blas_level_3_trsm Triangular System Solver (trsm) // // The following wrapper functions provide a generic interface for the BLAS functions for solving // a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()): \code namespace blaze { void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, float alpha, const float* A, int lda, float* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, double alpha, const double* A, int lda, double* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A, int lda, complex<float>* B, int ldb ); void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A, int lda, complex<double>* B, int ldb ); template< typename MT, bool SO, typename VT, bool TF, typename ST > void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST > void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha ); } // namespace blaze \endcode // \n Previous: \ref error_reporting_customization &nbsp; &nbsp; Next: \ref lapack_functions \n */ //************************************************************************************************* //**LAPACK Functions******************************************************************************* /*!\page lapack_functions LAPACK Functions // // \tableofcontents // // // \n \section lapack_introction Introduction // <hr> // // The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks // (including the decomposition, inversion and the computation of the determinant of dense matrices). // For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required // LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper // functions. For more details on the individual LAPACK functions see the \b Blaze function // documentation or the LAPACK online documentation browser: // // http://www.netlib.org/lapack/explore-html/ // // Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They // provide the parameters of the original LAPACK functions and thus provide maximum flexibility: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const int m ( numeric_cast<int>( A.rows() ) ); // == N const int n ( numeric_cast<int>( A.columns() ) ); // == N const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N const int lwork( n*lda ); const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required const std::unique_ptr<double[]> work( new double[N] ); // No initialization required int info( 0 ); getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info' getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info' \endcode // Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These // wrappers provide a maximum of convenience: \code constexpr size_t N( 100UL ); blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N ); // ... Initializing the matrix const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required getrf( A, ipiv.get() ); // Cannot fail getri( A, ipiv.get() ); // Reports failure via exception \endcode // \note All functions only work for general, non-adapted matrices with \c float, \c double, // \c complex<float>, or \c complex<double> element type. The attempt to call the function with // adaptors or matrices of any other element type results in a compile time error! // // \note All functions can only be used if a fitting LAPACK library is available and linked to // the final executable. Otherwise a call to this function will result in a linker error. // // \note For performance reasons all functions do only provide the basic exception safety guarantee, // i.e. in case an exception is thrown the given matrix may already have been modified. // // // \n \section lapack_decomposition Matrix Decomposition // <hr> // // The following functions decompose/factorize the given dense matrix. Based on this decomposition // the matrix can be inverted or used to solve a linear system of equations. // // // \n \subsection lapack_lu_decomposition LU Decomposition // // The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(), // \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix: \code namespace blaze { void getrf( int m, int n, float* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, double* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info ); void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info ); template< typename MT, bool SO > void getrf( DenseMatrix<MT,SO>& A, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = P \cdot L \cdot U, \f]\n // where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper // triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major // matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit // diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is // transposed. // // \note The LU decomposition will never fail, even for singular matrices. However, in case of a // singular matrix the resulting decomposition cannot be used for a matrix inversion or solving // a linear system of equations. // // // \n \subsection lapack_ldlt_decomposition LDLT Decomposition // // The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(), // \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given // symmetric indefinite matrix: \code namespace blaze { void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info ); void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or } A = L D L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_ldlh_decomposition LDLH Decomposition // // The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(), // which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix: \code namespace blaze { void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info ); void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or } A = L D L^{H} \texttt{ (if uplo = 'L'), } \f] // where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices, // and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting // decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in // the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to // \c 'U' the result is stored in the upper part and the lower part remains untouched. // // \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in // case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or // solving a linear system of equations. // // // \n \subsection lapack_llh_decomposition Cholesky Decomposition // // The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(), // \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given // positive definite matrix: \code namespace blaze { void potrf( char uplo, int n, float* A, int lda, int* info ); void potrf( char uplo, int n, double* A, int lda, int* info ); void potrf( char uplo, int n, complex<float>* A, int lda, int* info ); void potrf( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potrf( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The decomposition has the form \f[ A = U^{T} U \texttt{ (if uplo = 'U'), or } A = L L^{T} \texttt{ (if uplo = 'L'), } \f] // where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky // decomposition fails if the given matrix \a A is not a positive definite matrix. In this case // a \a std::std::invalid_argument exception is thrown. // // // \n \subsection lapack_qr_decomposition QR Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(), // \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix: \code namespace blaze { void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot R, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the // min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n); // the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as // a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(), // \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition: \code namespace blaze { void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(), // \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from // a QR decomposition: \code namespace blaze { void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_rq_decomposition RQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(), // \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix: \code namespace blaze { void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = R \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and // <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>, // and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray // <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case // \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n // upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau // represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(), // \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition: \code namespace blaze { void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(), // \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from // a RQ decomposition: \code namespace blaze { void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_ql_decomposition QL Decomposition // // The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(), // \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix: \code namespace blaze { void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = Q \cdot L, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and // <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>, // and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray // A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n, // the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower // trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent // the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(), // \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition: \code namespace blaze { void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(), // \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from // a QL decomposition: \code namespace blaze { void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \subsection lapack_lq_decomposition LQ Decomposition // // The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(), // \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix: \code namespace blaze { void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info ); void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info ); void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info ); void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau ); } // namespace blaze \endcode // The decomposition has the form \f[ A = L \cdot Q, \f] // where the \c Q is represented as a product of elementary reflectors \f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f] // Each H(i) has the form \f[ H(i) = I - tau \cdot v \cdot v^T, \f] // where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and // <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau // in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the // \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n); // the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q // as a product of min(\a m,\a n) elementary reflectors. // // The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(), // \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition: \code namespace blaze { void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info ); void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info ); void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); template< typename MT, bool SO > void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau ); } // namespace blaze \endcode // The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(), // \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from // a LQ decomposition: \code namespace blaze { void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info ); void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info ); void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau ); template< typename MT1, bool SO, typename MT2 > void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau ); } // namespace blaze \endcode // \n \section lapack_inversion Matrix Inversion // <hr> // // Given a matrix that has already been decomposed, the following functions can be used to invert // the matrix in-place. // // // \n \subsection lapack_lu_inversion LU-based Inversion // // The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(), // \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by // an \ref lapack_lu_decomposition : \code namespace blaze { void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info ); void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info ); void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info ); void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO > void getri( DenseMatrix<MT,SO>& A, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_inversion LDLT-based Inversion // // The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(), // \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been // decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info ); void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info ); void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_inversion LDLH-based Inversion // // The following functions provide an interface for the LAPACK functions \c chetri() and // \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by // an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info ); void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info ); template< typename MT, bool SO > void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_inversion Cholesky-based Inversion // // The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(), // \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been // decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potri( char uplo, int n, float* A, int lda, int* info ); void potri( char uplo, int n, double* A, int lda, int* info ); void potri( char uplo, int n, complex<float>* A, int lda, int* info ); void potri( char uplo, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void potri( DenseMatrix<MT,SO>& A, char uplo ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(), // \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place: \code namespace blaze { void trtri( char uplo, char diag, int n, float* A, int lda, int* info ); void trtri( char uplo, char diag, int n, double* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info ); void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info ); template< typename MT, bool SO > void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag ); } // namespace blaze \endcode // The functions fail if ... // // - ... the given matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the given matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \section lapack_substitution Substitution // <hr> // // Given a matrix that has already been decomposed the following functions can be used to perform // the forward/backward substitution step to compute the solution to a system of linear equations. // Note that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \n \subsection lapack_lu_substitution LU-based Substitution // // The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(), // \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has // already been decomposed by an \ref lapack_lu_decomposition : \code namespace blaze { void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_substitution LDLT-based Substitution // // The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(), // \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite // matrix that has already been decomposed by an \ref lapack_ldlt_decomposition : \code namespace blaze { void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_substitution LDLH-based Substitution // // The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(), // which perform the substitution step for an Hermitian indefinite matrix that has already been // decomposed by an \ref lapack_ldlh_decomposition : \code namespace blaze { void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info ); void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_substitution Cholesky-based Substitution // // The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(), // \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix // that has already been decomposed by an \ref lapack_llh_decomposition : \code namespace blaze { void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match. // // The first two functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(), // \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix: \code namespace blaze { void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s) // of the linear system of equations. The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'; // - ... the sizes of the two given matrices do not match. // // The first four functions report failure via the \c info argument, the last two functions throw // a \a std::invalid_argument exception in case of an error. // // // \n \section lapack_linear_system_solver Linear System Solver // <hr> // // The following functions represent compound functions that perform both the decomposition step // as well as the substitution step to compute the solution to a system of linear equations. Note // that depending on the storage order of the system matrix and the given right-hand side the // functions solve different equation systems: // // Single right-hand side: // - \f$ A *x=b \f$ if \a A is column-major // - \f$ A^T*x=b \f$ if \a A is row-major // // Multiple right-hand sides: // - \f$ A *X =B \f$ if both \a A and \a B are column-major // - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major // - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major // - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major // // In this context the general system matrix \a A is a n-by-n matrix that has already been // factorized by the according decomposition function, \a x and \a b are n-dimensional vectors // and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices. // // // \subsection lapack_lu_linear_system_solver LU-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(), // \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according // \ref lapack_lu_substitution : \code namespace blaze { void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info ); void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info ); void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_lu_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(), // \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according // \ref lapack_ldlt_substitution : \code namespace blaze { void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlt_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(), // \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according // \ref lapack_ldlh_substitution : \code namespace blaze { void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info ); void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_ldlh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first two functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver // // The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(), // \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according // \ref lapack_llh_substitution : \code namespace blaze { void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info ); void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info ); template< typename MT, bool SO, typename VT, bool TF > void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo ); template< typename MT1, bool SO1, typename MT2, bool SO2 > void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations and \a A has been decomposed by means of an // \ref lapack_llh_decomposition. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the sizes of the two given matrices do not match; // - ... the given system matrix is singular and not invertible. // // The first four functions report failure via the \c info argument, the fifth function throws a // \a std::invalid_argument exception in case of an error. // // // \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices // // The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(), // \c ctrsv(), and \c ztrsv(): \code namespace blaze { void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX ); void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX ); template< typename MT, bool SO, typename VT, bool TF > void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag ); } // namespace blaze \endcode // If the function exits successfully, the vector \a b or the matrix \a B contain the // solution(s) of the linear system of equations. // // The functions fail if ... // // - ... the given system matrix is not a square matrix; // - ... the given \a uplo argument is neither 'L' nor 'U'; // - ... the given \a trans argument is neither 'N' nor 'T' nor 'C'; // - ... the given \a diag argument is neither 'U' nor 'N'. // // The last function throws a \a std::invalid_argument exception in case of an error. Note that // none of the functions does perform any test for singularity or near-singularity. Such tests // must be performed prior to calling this function! // // // \n \section lapack_eigenvalues Eigenvalues/Eigenvectors // // \subsection lapack_eigenvalues_general General Matrices // // The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(), // \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of // the given general matrix: \code namespace blaze { void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info ); void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR ); template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 > void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR ); } // namespace blaze \endcode // The complex eigenvalues of the given matrix \a A are returned in the given vector \a w. // Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs // of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part // first. // // If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR // in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major // matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies \f[ A * v[j] = lambda[j] * v[j], \f] // where \f$lambda[j]\f$ is its eigenvalue. // // If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL // in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major // matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies \f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f] // where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$. // // \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The // functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match; // - ... the eigenvalue computation fails. // // The first four functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices // // The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(), // which compute the eigenvalues and eigenvectors of the given symmetric matrix: \code namespace blaze { void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info ); void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info ); void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix: \code namespace blaze { void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info ); void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices // // The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(), // which compute the eigenvalues and eigenvectors of the given Hermitian matrix: \code namespace blaze { void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info ); void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a // divide-and-conquer strategy for the computation of the left and right eigenvectors: \code namespace blaze { void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info ); void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo ); } // namespace blaze \endcode // The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized // to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left // eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right // eigenvectors are returned in the columns of \a A. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given \a jobz argument is neither \c 'V' nor \c 'N'; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last function throws an // exception in case of an error. // // Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it // is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix: \code namespace blaze { void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info ); void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo ); template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST > size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp ); } // namespace blaze \endcode // The number of eigenvalues to be computed is specified by the lower bound \c low and the upper // bound \c upp, which either form an integral or a floating point range. // // In case \a low and \a upp are of integral type, the function computes all eigenvalues in the // index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending // order in the given vector \a w, which is either resized (if possible) or expected to be a // \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is // row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is // resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num // column-major matrix. // // In case \a low and \a upp are of floating point type, the function computes all eigenvalues // in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in // ascending order in the given vector \a w, which is either resized (if possible) or expected // to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case // \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. // \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix. // // The functions fail if ... // // - ... the given matrix \a A is not a square matrix; // - ... the given vector \a w is a fixed size vector and the size doesn't match; // - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match; // - ... the given \a uplo argument is neither \c 'L' nor \c 'U'; // - ... the eigenvalue computation fails. // // The first two functions report failure via the \c info argument, the last four functions throw // an exception in case of an error. // // // \n \section lapack_singular_values Singular Values/Singular Vectors // // The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(), // \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given // general matrix: \code namespace blaze { void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info ); void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv ); } // namespace blaze \endcode // Alternatively, the following functions can be used, which provide an interface to the LAPACK // functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd() // functions they compute the singular value decomposition (SVD) of the given general matrix by // applying a divide-and-conquer strategy for the computation of the left and right singular // vectors: \code namespace blaze { void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz ); } // namespace blaze \endcode // The resulting decomposition has the form \f[ A = U \cdot S \cdot V, \f] // where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal // elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal // matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n) // columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively. // // The resulting min(\a m,\a n) real and non-negative singular values are returned in descending // order in the vector \a s, which is resized to the correct size (if possible and necessary). // // Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(), // \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or // vectors: \code namespace blaze { void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info ); void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info ); template< typename MT, bool SO, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s ); template< typename MT, bool SO, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V ); template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V ); template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST > size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp ); } // namespace blaze \endcode // The number of singular values to be computed is specified by the lower bound \a low and the // upper bound \a upp, which either form an integral or a floating point range. // // In case \a low and \a upp form are of integral type, the function computes all singular values // in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored // in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V, // which is either resized (if possible) or expected to be a \a num-by-\a n matrix. // // In case \a low and \a upp are of floating point type, the function computes all singular values // in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values // are stored in descending order in the given vector \a s, which is either resized (if possible) // or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are // stored in the given matrix \a U, which is either resized (if possible) or expected to be a // \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given // matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n // matrix. // // The functions fail if ... // // - ... the given matrix \a U is a fixed size matrix and the dimensions don't match; // - ... the given vector \a s is a fixed size vector and the size doesn't match; // - ... the given matrix \a V is a fixed size matrix and the dimensions don't match; // - ... the given scalar values don't form a proper range; // - ... the singular value decomposition fails. // // The first four functions report failure via the \c info argument, the remaining functions throw // an exception in case of an error. // // // \n Previous: \ref blas_functions &nbsp; &nbsp; Next: \ref block_vectors_and_matrices \n */ //************************************************************************************************* //**Block Vectors and Matrices********************************************************************* /*!\page block_vectors_and_matrices Block Vectors and Matrices // // \tableofcontents // // // \n \section block_vectors_and_matrices_general General Concepts // <hr> // // In addition to fundamental element types, the \b Blaze library supports vectors and matrices // with non-fundamental element type. For instance, it is possible to define block matrices by // using a matrix type as the element type: \code using blaze::DynamicMatrix; using blaze::DynamicVector; using blaze::rowMajor; using blaze::columnVector; DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A; DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y; // ... Resizing and initialization y = A * x; \endcode // The matrix/vector multiplication in this example runs fully parallel and uses vectorization // for every inner matrix/vector multiplication and vector addition. // // // \n \section block_vectors_and_matrices_pitfalls Pitfalls // <hr> // // The only thing to keep in mind when using non-fundamental element types is that all operations // between the elements have to be well defined. More specifically, the size of vector and matrix // elements has to match. The attempt to combine two non-matching elements results in either a // compilation error (in case of statically sized elements) or an exception (for dynamically sized // elements): \code DynamicVector< StaticVector<int,2UL> > a; DynamicVector< StaticVector<int,3UL> > b; DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match \endcode // Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector, // \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized // accordingly upfront. // // // \n \section block_vectors_and_matrices_examples Examples // <hr> // // The first example demonstrates the multiplication between a statically sized block matrix // and a block vector: \code using namespace blaze; // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) ) // ( ) * ( ) = ( ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) // ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) ) using M2x2 = StaticMatrix<int,2UL,2UL,rowMajor>; using V2 = StaticVector<int,2UL,columnVector>; DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }, { M2x2(3), M2x2(4) } }; DynamicVector<V2,columnVector> x{ V2(1), V2(2) }; DynamicVector<V2,columnVector> y( A * x ); \endcode // The second example shows the multiplication between a compressed block matrix with blocks of // varying size and a compressed block vector: \code using namespace blaze; // ( ( 1 -2 3 ) ( 5 -1 ) ) ( ( 1 ) ) ( ( -3 ) ) // ( ( 4 1 0 ) ( 1 2 ) ) ( ( 0 ) ) ( ( 7 ) ) // ( ( 0 2 4 ) ( 3 1 ) ) ( ( 1 ) ) ( ( 3 ) ) // ( ) ( ) ( ) // ( ( 1 ) ) * ( ( 2 ) ) = ( ( 2 ) ) // ( ) ( ) ( ) // ( ( 0 -1 1 ) ( 1 0 ) ) ( ( -1 ) ) ( ( 0 ) ) // ( ( 2 -1 2 ) ( 0 1 ) ) ( ( 2 ) ) ( ( 6 ) ) using M3x3 = HybridMatrix<int,3UL,3UL,rowMajor>; using V3 = HybridVector<int,3UL,columnVector>; CompressedMatrix<M3x3,rowMajor> A( 3UL, 3UL, 5UL ); A(0,0) = M3x3{ { 1, -2, 3 }, { 4, 1, 0 }, { 0, 2, 4 } }; A(0,2) = M3x3{ { 5, -1 }, { 1, 2 }, { 3, 1 } }; A(1,1) = M3x3{ { 1 } }; A(2,0) = M3x3{ { 0, -1, 1 }, { 2, -1, 2 } }; A(2,2) = M3x3{ { 1, 0 }, { 0, 1 } }; CompressedVector<V3,columnVector> x( 3UL, 3UL ); x[0] = V3{ 1, 0, 1 }; x[1] = V3{ 2 }; x[2] = V3{ -1, 2 }; CompressedVector<V3,columnVector> y( A * x ); \endcode // \n Previous: \ref lapack_functions &nbsp; &nbsp; Next: \ref intra_statement_optimization \n */ //************************************************************************************************* //**Intra-Statement Optimization******************************************************************* /*!\page intra_statement_optimization Intra-Statement Optimization // // One of the prime features of the \b Blaze library is the automatic intra-statement optimization. // In order to optimize the overall performance of every single statement \b Blaze attempts to // rearrange the operands based on their types. For instance, the following addition of dense and // sparse vectors \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1 + d2; \endcode // is automatically rearranged and evaluated as \code // ... d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged \endcode // This order of operands is highly favorable for the overall performance since the addition of // the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized // fashion. // // This intra-statement optimization can have a tremendous effect on the performance of a statement. // Consider for instance the following computation: \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = A * B * x; \endcode // Since multiplications are evaluated from left to right, this statement would result in a // matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the // right subexpression is evaluated first, the performance can be dramatically improved since the // matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication. // The \b Blaze library exploits this by automatically restructuring the expression such that the // right multiplication is evaluated first: \code // ... y = A * ( B * x ); \endcode // Note however that although this intra-statement optimization may result in a measurable or // even significant performance improvement, this behavior may be undesirable for several reasons, // for instance because of numerical stability. Therefore, in case the order of evaluation matters, // the best solution is to be explicit and to separate a statement into several statements: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ... d3 += d2; // ... and afterwards add the second dense vector \endcode \code // ... blaze::DynamicMatrix<double> A, B, C; blaze::DynamicVector<double> x, y; // ... Resizing and initialization C = A * B; // Compute the left-hand side matrix-matrix multiplication first ... y = C * x; // ... before the right-hand side matrix-vector multiplication \endcode // Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation: \code blaze::DynamicVector<double> d1, d2, d3; blaze::CompressedVector<double> s1; // ... Resizing and initialization d3 = d1 + eval( s1 + d2 ); \endcode \code blaze::DynamicMatrix<double> A, B; blaze::DynamicVector<double> x, y; // ... Resizing and initialization y = eval( A * B ) * x; \endcode // \n Previous: \ref block_vectors_and_matrices &nbsp; &nbsp; Next: \ref faq \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page faq Frequently Asked Questions (FAQ) // // \tableofcontents // // // <hr> // \section faq_padding A StaticVector/StaticMatrix is larger than expected. Is this a bug? // // The size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can // indeed be larger than expected: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16, 32, or even 64, but not 12 sizeof( A ); // Evaluates to 48, 96, or even 144, but not 36 \endcode // In order to achieve the maximum possible performance the \b Blaze library tries to enable // SIMD vectorization even for small vectors. For that reason \b Blaze by default uses padding // elements for all dense vectors and matrices to guarantee that at least a single SIMD vector // can be loaded. Depending on the used SIMD technology that can significantly increase the size // of a \c StaticVector, \c StaticMatrix, \c HybridVector or \c HybridMatrix: \code StaticVector<int,3> a; StaticMatrix<int,3,3> A; sizeof( a ); // Evaluates to 16 in case of SSE, 32 in case of AVX, and 64 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) sizeof( A ); // Evaluates to 48 in case of SSE, 96 in case of AVX, and 144 in case of AVX-512 // (under the assumption that an integer occupies 4 bytes) \endcode // The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch // that can be used to (de-)activate padding: \code #define BLAZE_USE_PADDING 1 \endcode // Alternatively it is possible to (de-)activate padding via command line or by defining this // symbol manually before including any \b Blaze header file: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> \endcode // If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if // it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce // the performance of all dense vector and matrix operations! // // // <hr> // \section faq_alignment Despite disabling padding, a StaticVector/StaticMatrix is still larger than expected. Is this a bug? // // Despite disabling padding via the \c BLAZE_USE_PADDING compile time switch (see \ref faq_padding), // the size of a \c StaticVector, \c StaticMatrix, \c HybridVector, or \c HybridMatrix can still // be larger than expected: \code #define BLAZE_USE_PADDING 1 #include <blaze/Blaze.h> StaticVector<int,3> a; StaticVector<int,5> b; sizeof( a ); // Always evaluates to 12 sizeof( b ); // Evaluates to 32 with SSE (larger than expected) and to 20 with AVX or AVX-512 (expected) \endcode // The reason for this behavior is the used SIMD technology. If SSE is used, which provides 128 // bit wide registers, a single SIMD pack can usually hold 4 integers (128 bit divided by 32 bit). // Since the second vector contains enough elements is possible to benefit from vectorization. // However, SSE requires an alignment of 16 bytes, which ultimately results in a total size of // 32 bytes for the \c StaticVector (2 times 16 bytes due to 5 integer elements). If AVX or AVX-512 // is used, which provide 256 bit or 512 bit wide registers, a single SIMD vector can hold 8 or 16 // integers, respectively. Even the second vector does not hold enough elements to benefit from // vectorization, which is why \b Blaze does not enforce a 32 byte (for AVX) or even 64 byte // alignment (for AVX-512). // // It is possible to disable the vectorization entirely by the compile time switch in the // <tt>./blaze/config/Vectorization.h</tt> configuration file: \code #define BLAZE_USE_VECTORIZATION 1 \endcode // It is also possible to (de-)activate vectorization via command line or by defining this symbol // manually before including any \b Blaze header file: \code #define BLAZE_USE_VECTORIZATION 1 #include <blaze/Blaze.h> \endcode // In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed // to use intrinsics and the necessary alignment to speed up computations. In case the switch is // set to 0, vectorization is disabled entirely and the \b Blaze library chooses default, // non-vectorized functionality for the operations. Note that deactivating the vectorization may // pose a severe performance limitation for a large number of operations! // // // <hr> // \section faq_blas To which extend does Blaze make use of BLAS functions under the hood? // // Currently the only BLAS functions that are utilized by \b Blaze are the \c gemm() functions // for the multiplication of two dense matrices (i.e. \c sgemm(), \c dgemm(), \c cgemm(), and // \c zgemm()). All other operations are always and unconditionally performed by native \b Blaze // kernels. // // The \c BLAZE_BLAS_MODE config switch (see <tt>./blaze/config/BLAS.h</tt>) determines whether // \b Blaze is allowed to use BLAS kernels. If \c BLAZE_BLAS_MODE is set to 0 then \b Blaze // does not utilize the BLAS kernels and unconditionally uses its own custom kernels. If // \c BLAZE_BLAS_MODE is set to 1 then \b Blaze is allowed to choose between using BLAS kernels // or its own custom kernels. In case of the dense matrix multiplication this decision is based // on the size of the dense matrices. For large matrices, \b Blaze uses the BLAS kernels, for // small matrices it uses its own custom kernels. The threshold for this decision can be // configured via the \c BLAZE_DMATDMATMULT_THRESHOLD, \c BLAZE_DMATTDMATMULT_THRESHOLD, // \c BLAZE_TDMATDMATMULT_THRESHOLD and \c BLAZE_TDMATTDMATMULT_THRESHOLD config switches // (see <tt>./blaze/config/Thresholds.h</tt>). // // Please note that the extend to which \b Blaze uses BLAS kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_lapack To which extend does Blaze make use of LAPACK functions under the hood? // // \b Blaze uses LAPACK functions for matrix decomposition, matrix inversion, computing the // determinants and eigenvalues, and the SVD. In contrast to the BLAS functionality (see // \ref faq_blas), you cannot disable LAPACK or switch to custom kernels. In case you try to // use any of these functionalities, but do not provide (i.e. link) a LAPACK library you will // get link time errors. // // Please note that the extend to which \b Blaze uses LAPACK kernels can change in future releases // of \b Blaze! // // // <hr> // \section faq_compile_times The compile time is too high if I include <blaze/Blaze.h>. Can I reduce it? // // The include file <tt><blaze/Blaze.h></tt> includes the entire functionality of the \b Blaze // library, which by now is several hundred thousand lines of source code. That means that a lot // of source code has to be parsed whenever <tt><blaze/Blaze.h></tt> is encountered. However, it // is rare that everything is required within a single compilation unit. Therefore it is easily // possible to reduce compile times by including only those \b Blaze features that are used within // the compilation unit. For instance, instead of including <tt><blaze/Blaze.h></tt> it could be // enough to include <tt><blaze/math/DynamicVector.h></tt>, which would reduce the compilation // times by about 20%. // // Additionally we are taking care to implement new \b Blaze functionality such that compile times // do not explode and try to reduce the compile times of existing features. Thus newer releases of // \b Blaze can also improve compile times. // // \n Previous: \ref intra_statement_optimization &nbsp; &nbsp; Next: \ref issue_creation_guidelines \n */ //************************************************************************************************* //**FAQ******************************************************************************************** /*!\page issue_creation_guidelines Issue Creation Guidelines // // \tableofcontents // // // One of the most important aspects of the \b Blaze project is the // <a href="https://bitbucket.org/blaze-lib/blaze/issues">issue management</a> on the official // \b Blaze Bitbucket page. We cordially invite all \b Blaze users to submit feature requests // and bug reports, as we believe that this is a significant part of making \b Blaze a better // library. However, we are asking to follow a small set of guidelines when creating an issue // to facilitate the issue management on our side and also to make issues more useful for users // of \b Blaze. // // // <hr> // \section issues_title Title // // The title is the most important detail of an issue. A well chosen title makes it easy to grasp // the idea of an issue and improves the discoverability. Therefore, please choose a title that // is ... // // - ... as descriptive as possible; // - ... as concise as possible; // - ... as unambiguous as possible. // // Also, please create a separate issue for each idea/problem/etc. A very general title or an // \"and\" in the title could be an indication that the issue is not specific enough and should // be split into several issues. // // \subsection issues_title_good_examples Good Examples // // - \"Provide support for AVX-512 SIMD operations\" // - \"Add support for the Boost Multiprecision Library\" // - \"Introduce reduction operations into Blaze\" // - \"Compilation error on KNL with -march=knl\" // // \subsection issues_title_bad_examples Bad Examples // // - \"Several requests\" (instead create separate issues for each single request) // - \"Improve the performance\" (instead specify which operation should perform better) // - \"Blaze library compilation error\" (instead try to be more specific) // // // <hr> // \section issues_description Description // // The description should help us to understand your idea or problem in as much detail as possible. // Also, it helps to clearly spell out your expectations (how a feature is supposed to work, how // the behavior should be, etc.). Please spend a couple of minutes to try to make the description // as comprehensive as possible. // // // <hr> // \section issues_assignee Assignee // // There is no need to assign the issue to a particular person. It is perfectly ok if you just // ignore this setting. // // // <hr> // \section issues_kind Kind of Issue // // There are four kinds of issues available in the Bitbucket issue tracker: \ref issues_kind_bug, // \ref issues_kind_enhancement, \ref issues_kind_proposal, and \ref issues_kind_task. In the // following we try to give guidelines on which kind to choose for a particular issue: // // \subsection issues_kind_bug Bug // // Please choose the category \ref issues_kind_bug if ... // // - ... you experience a compilation error despite your best efforts to get it right; // - ... you experience a crash/failure despite your best efforts to get it right; // - ... you experience problems when combining features; // - ... a feature does not work as specified/documented (i.e. can be considered broken). // // Please \b don't choose the category \ref issues_kind_bug if ... // // - ... you feel a feature should work differently than it currently does (instead create a // \ref issues_kind_proposal with a convincing title and description); // - ... you are not sure how to use a feature (instead create an \ref issues_kind_enhancement // issue to extend the documentation); // - ... you are missing a feature (instead create a \ref issues_kind_proposal or // \ref issues_kind_enhancement issue). // // If you select the category \ref issues_kind_bug, please also try to provide a minimum example // that fails. That helps us to minimize the time to resolve the bug. // // As we try to keep \b Blaze bug-free, we will always prioritize bug issues. However, we will // also quickly close bug issues as \"wontfix\" if the described issue is not a bug (i.e. one of // the problems mentioned above). We will \b not relabel a bug issue to \ref issues_kind_enhancement // or \ref issues_kind_proposal, even if they would be reasonable extensions to \b Blaze. // // \subsection issues_kind_enhancement Enhancement // // Please choose the category \ref issues_kind_enhancement if ... // // - ... you need an add-on to an existing feature; // - ... you need an extension of an existing feature; // - ... you need an extended documentation for an existing feature. // // \ref issues_kind_enhancement is very similar to \ref issues_kind_proposal, so we don't mind // if an \ref issues_kind_enhancement is labeled as a \ref issues_kind_proposal or vice versa. // Just make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_proposal Proposal // // Please choose the category \ref issues_kind_proposal if ... // // - ... you want to request a new feature; // - ... you want to change an existing feature. // // \ref issues_kind_proposal is very similar to \ref issues_kind_enhancement, so we don't mind if // a \ref issues_kind_proposal is labeled as an \ref issues_kind_enhancement or vice versa. Just // make sure you don't request an extension or new feature as a \ref issues_kind_bug. // // \subsection issues_kind_task Task // // Please choose the category \ref issues_kind_task if ... // // - ... you want us to do something not feature related; // - ... you have something else in mind which does not fall in the other three categories. // // // <hr> // \section issues_priority Priority // // Via the priority of an issue you can tell us how important the issue is to you. Therefore the // priority can have an influence on when we will deal with the issue. However, unfortunately we // don't have an infinite amount of time and we can not deal with an arbitrary amount of issues // at the same time. We will therefore take the priority into account, but mainly schedule the // issues based on impact to all \b Blaze users and the estimated time to resolve it. // // You can choose between \ref issues_priority_blocker, \ref issues_priority_critical, // \ref issues_priority_major, \ref issues_priority_minor, and \ref issues_priority_trivial. // // \subsection issues_priority_blocker Blocker // // Please choose a \ref issues_priority_blocker priority if ... // // - ... you cannot work with \b Blaze due to the described \ref issues_kind_bug; // - ... the \ref issues_kind_bug likely has an influence on \b all \b Blaze users. // // Please note that the categories \ref issues_kind_enhancement or \ref issues_kind_proposal // should never be a \ref issues_priority_blocker! // // \subsection issues_priority_critical Critical // // Please choose a \ref issues_priority_critical priority if ... // // - ... you can work around a \ref issues_kind_bug, but the workaround is (much) slower or awful; // - ... you cannot use \b Blaze without the proposed feature; // - ... you consider it to be essential for \b all \b Blaze users. // // \subsection issues_priority_major Major // // Please choose a \ref issues_priority_major priority if ... // // - ... a \ref issues_kind_bug or feature request is not \ref issues_priority_critical, but // still very important to you; // - ... you consider it to have a \ref issues_priority_major impact on most \b Blaze users. // // The \ref issues_priority_major category is the default setting in Bitbucket and we therefore // consider it as the default priority for issues. // // \subsection issues_priority_minor Minor // // Please choose a \ref issues_priority_minor priority if ... // // - ... a \ref issues_kind_bug does not affect many \b Blaze users; // - ... a feature request would only be useful for a small number of \b Blaze users; // - ... a feature would be nice to have, but is not particularly important. // // \subsection issues_priority_trivial Trivial // // Please choose a \ref issues_priority_trivial priority if ... // // - ... a \ref issues_kind_bug hardly affects anyone; // - ... a feature request would only be useful for very few \b Blaze users; // - ... the expected time to resolve an issue is very small. // // // <hr> // \section issues_attachment Attachments // // You can always provide us with additional information in the form of attachments. Feel free // to attach something to the issue if ... // // - ... it can help us to analyze a \ref issues_kind_bug; // - ... you have some source code that demonstrates a problem; // - ... you already have a working prototype that sketches the idea; // - ... you have additional resources that could help us. // // We appreciate anything that simplifies our work and speeds up our progress. // // \n Previous: \ref faq &nbsp; &nbsp; Next: \ref blaze_references \n */ //************************************************************************************************* //**Blaze References******************************************************************************* /*!\page blaze_references Blaze References // // In case you need references to the \b Blaze library (for papers or other publications), please // feel free to use one of the following references: \code @misc{blazelib, author = "Klaus {Iglberger}", title = "Blaze C++ Linear Algebra Library", howpublished = "https://bitbucket.org/blaze-lib", year = 2012 } \endcode \code @article{iglberger2012_1, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "Expression Templates Revisited: A Performance Analysis of Current Methodologies", journal = "SIAM Journal on Scientific Computing", year = 2012, volume = 34(2), pages = C42--C69 } \endcode \code @inproceedings{iglberger2012_2, author = "Klaus {Iglberger} and Georg {Hager} and Jan {Treibig} and Ulrich {R{\"u}de}", title = "High Performance Smart Expression Template Math Libraries", booktitle = "Proceedings of the 2nd International Workshop on New Algorithms and Programming Models for the Manycore Era (APMM 2012) at HPCS 2012", year = 2012 } \endcode // \n Previous: \ref issue_creation_guidelines */ //************************************************************************************************* #endif
SharedComponents.h
/***************************************************************************** * * Copyright (c) 2003-2018 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014 by Centre for Geoscience Computing (GeoComp) * *****************************************************************************/ /****************************************************************************/ /* Paso: shared components */ /****************************************************************************/ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_SHAREDCOMPONENTS_H__ #define __PASO_SHAREDCOMPONENTS_H__ #include "Paso.h" namespace paso { struct SharedComponents; typedef boost::shared_ptr<SharedComponents> SharedComponents_ptr; typedef boost::shared_ptr<const SharedComponents> const_SharedComponents_ptr; PASO_DLL_API struct SharedComponents { SharedComponents(dim_t localLength, const std::vector<int>& neighbours, const index_t* sharedArray, const std::vector<index_t>& offset, index_t m = 1, index_t b = 0) : local_length(localLength*m), neighbour(neighbours), offsetInShared(offset) { if (offset.empty()) { numSharedComponents = 0; } else { numSharedComponents = offset[neighbours.size()] * m; } shared = new index_t[numSharedComponents]; if (!neighbours.empty() && !offset.empty()) { if (m != 1) { for (int i = 0; i < offsetInShared.size(); i++) { offsetInShared[i] *= m; } } #pragma omp parallel for for (dim_t i = 0; i < offset[neighbours.size()]; i++) { const index_t itmp = m * sharedArray[i] + b; for (dim_t j = 0; j < m; ++j) shared[m*i+j] = itmp+j; } } else { offsetInShared[neighbours.size()] = 0; } } ~SharedComponents() { delete[] shared; } /// local array length shared dim_t local_length; /// list of the processors sharing values with this processor std::vector<int> neighbour; /// offsetInShared[i] points to the first input value in array shared /// for processor i. Has length numNeighbors+1 std::vector<index_t> offsetInShared; /// list of the (local) components which are shared with other processors. /// Has length numSharedComponents index_t* shared; /// = offsetInShared[numNeighbours] dim_t numSharedComponents; }; } // namespace paso #endif // __PASO_SHAREDCOMPONENTS_H__
common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #pragma once #ifndef COMMON_H #define COMMON_H // Since we call cblas_dgemm in openmp for loop, // we call "extension" APIs for setting the number of threads. #ifdef USE_INTEL_MKL #include <mkl.h> #if INTEL_MKL_VERSION < 20170000 // Will throw an error at development time in non-standard settings PLEASE DONOT COMPILE SHARED LIBRARIES WITH OLDER MKL VERSIONS #endif #include <mkl_service.h> extern "C" void mkl_set_num_threads(int numThreads); #else #include <cblas.h> extern "C" void openblas_set_num_threads(int numThreads); #endif template<class FP> size_t computeNNZ(FP* arr, int limit) { size_t nnz = 0; #ifndef USE_INTEL_MKL #pragma omp parallel for reduction(+: nnz) #endif for(int i=0; i<limit; i++) nnz += (arr[i]!=0) ? 1 : 0; return nnz; } static int SYSDS_CURRENT_NUM_THREADS = -1; static void setNumThreadsForBLAS(int numThreads) { if (SYSDS_CURRENT_NUM_THREADS != numThreads) { #ifdef USE_OPEN_BLAS openblas_set_num_threads(numThreads); #else mkl_set_num_threads(numThreads); #endif SYSDS_CURRENT_NUM_THREADS = numThreads; } } #endif // COMMON_H
templatemath.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ /* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <dll.h> #include <pointercast.h> #include <platformmath.h> #define BFLOAT16_MAX_VALUE 32737. #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifndef M_E #define M_E 2.718281828459 #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template <typename T> math_def inline bool nd4j_eq(T val1, T val2, double eps); template<typename T, typename Z> math_def inline Z nd4j_re(T val1, T val2); template<typename T, typename Z> math_def inline Z nd4j_rint(T val1); template<typename T, typename Z> math_def inline Z nd4j_copysign(T val1, T val2); //#ifndef __CUDACC__ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length); //#endif template<typename T, typename Z> math_def inline Z nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T, typename Z> math_def inline Z nd4j_cos(T val); template<typename T, typename Z> math_def inline Z nd4j_cosh(T val); template<typename X, typename Z> math_def inline Z nd4j_exp(X val); template<typename T, typename Z> math_def inline Z nd4j_floor(T val); template<typename X, typename Z> math_def inline Z nd4j_log(X val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2); template<typename T, typename Z> math_def inline Z nd4j_round(T val); template<typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X num, Y denom); template<typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X num, Y denom); template<typename T, typename Z> math_def inline Z nd4j_erf(T num); template<typename T, typename Z> math_def inline Z nd4j_erfc(T num); template<typename T, typename Z> math_def inline Z nd4j_sigmoid(T val) { return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val)); } template<typename T, typename Z> math_def inline Z nd4j_elu(T val) { if (val >= (T) 0.f) return val; else return nd4j_exp<T, Z>(val) - (Z) 1.0f; //return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0); } template<typename T, typename Z> math_def inline Z nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; } template<typename T, typename Z> math_def inline Z nd4j_eluderivative(T val) { if (val >= (T) 0.0f) return (Z) 1.0f; else return nd4j_exp<T, Z>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T, typename Z> math_def inline Z nd4j_sin(T val); template<typename T, typename Z> math_def inline Z nd4j_sinh(T val); template<typename T, typename Z> math_def inline Z softplus(T val) { return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val)); } template<typename T, typename Z> math_def inline Z nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename X, typename Z> math_def inline Z nd4j_sqrt(X val); template<typename X, typename Z> math_def inline Z nd4j_tanh(X val); template<typename T, typename Z> math_def inline Z nd4j_tan(T val); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2); template<typename X, typename Z> math_def inline Z nd4j_atan2(X val1, X val2) { return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2)); } template<typename T, typename Z> math_def inline Z nd4j_tan(T tval) { return p_tan<Z>(static_cast<Z>(tval)); } template<typename T, typename Z> math_def inline Z nd4j_tanhderivative(T val) { Z tanh = nd4j_tanh<T,Z>(val); return (Z) 1.0f - tanh * tanh; } template <typename T, typename Z> math_def inline T nd4j_sigmoidderivative(T val) { Z sigmoid = nd4j_sigmoid<T,Z>(val); return sigmoid * ((Z) 1.0f - sigmoid); } template<typename T, typename Z> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (Z) 1.0f / (y * y); } template<typename T, typename Z> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f; } template<typename T, typename Z> math_def inline Z nd4j_sign(T val) { return nd4j_sgn<T, Z>(val); } template<typename T, typename Z> math_def inline Z nd4j_signum(T val) { return nd4j_sgn<T, Z>(val); } //#ifndef __CUDACC__ /* template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } */ template<typename X, typename Y, typename Z> math_def inline Z nd4j_dot(X *x, Y *y, int length) { Z dot = (Z)0.0f; //#pragma omp simd reduction(+:dot) for(int e = 0; e < length; e++) { dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]); } return dot; } //#endif template<typename T, typename Z> math_def inline Z nd4j_acos(T val); template<typename T, typename Z> math_def inline Z nd4j_acosh(T val); template<typename T, typename Z> math_def inline Z nd4j_asin(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val); template<typename T, typename Z> math_def inline Z nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val); } template<typename T, typename Z> math_def inline Z nd4j_atan(T val); template<typename T, typename Z> math_def inline Z nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS if (value < (float16) 0.f) { return float16(__hneg(value.data)); } else return value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) { return (bfloat16) fabsf((float) value); } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return fabs(value); } template<> math_def inline int nd4j_abs<int>(int value) { return abs(value); } template<> math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) { return llabs(value); } template<> math_def inline bool nd4j_abs<bool>(bool value) { return value; } template<> math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) { return value; } template<> math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) { return value; } template<> math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) { return value; } template<> math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) { return value; } template<> math_def inline int8_t nd4j_abs<int8_t>(int8_t value) { return value < 0 ? -value : value; } template<> math_def inline int16_t nd4j_abs<int16_t>(int16_t value) { return value < 0 ? -value : value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) { return value == bfloat16::nan(); //0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isnan<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isnan<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isnan<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) { return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) { return false; } template<> math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) { return false; } template<> math_def inline bool nd4j_isinf<int16_t>(int16_t value) { return false; } template<> math_def inline bool nd4j_isinf<int8_t>(int8_t value) { return false; } template<> math_def inline bool nd4j_isinf<bool>(bool value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) { if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1)); else return nd4j_abs<Nd4jLong>(val1); } template<> math_def inline bool nd4j_max(bool val1, bool val2) { return (val1 || val2) ? true : false; } template<typename T> math_def inline T nd4j_max(T val1, T val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline bool nd4j_min(bool val1, bool val2) { return (val1 && val2) ? true : false; } template<typename T> math_def inline T nd4j_min(T val1, T val2) { return val1 < val2 ? val1 : val2; } template <typename T> math_def inline bool nd4j_eq(T d1, T d2, double eps) { if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) { if (d1 > 0 && d2 > 0) return true; else if (d1 < 0 && d2 < 0) return true; else return false; } auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2)); // works well except in the range of very large numbers if (diff <= eps) return true; // Knuth approach // works well except in the range of very small numbers if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps) return true; return false; } template <typename X, typename Z> math_def inline Z nd4j_ceil(X val) { return static_cast<Z>(p_ceil<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_round(X val) { return static_cast<Z>(p_round<X>(val)); } template <typename X, typename Z> math_def inline Z nd4j_asin(X val) { return p_asin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atan(X val) { return p_atan<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_atanh(X val) { return p_atanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cosh(X val) { return p_cosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_rint(X val) { return p_rint<X>(val); } template <typename X, typename Z> math_def inline Z nd4j_sinh(X val) { return p_sinh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acos(X val) { return p_acos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_acosh(X val) { return p_acosh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_cos(X val) { return p_cos<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_exp(X val) { return p_exp<X>(val); } template<typename X, typename Z> math_def inline Z nd4j_floor(X val) { return static_cast<Z>(p_floor<X>(val)); } template<typename X, typename Z> math_def inline Z nd4j_log(X val) { return static_cast<Z>(p_log<X>(val)); } /** * This func is special case - it must return floating point value, and optionally Y arg can be floating point argument * @tparam X * @tparam Y * @tparam Z * @param val * @param val2 * @return */ template <typename X, typename Y, typename Z> math_def inline Z nd4j_pow(X val, Y val2) { return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template<typename T> math_def inline T nd4j_re(T val1, T val2) { if (val1 == (T) 0.0f && val2 == (T) 0.0f) return (T) 0.0f; return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_remainder(X val, Y val2) { return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Y, typename Z> math_def inline Z nd4j_fmod(X val, Y val2) { return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2)); } template <typename X, typename Z> math_def inline Z nd4j_sin(X val) { return p_sin<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_sqrt(X val) { return p_sqrt<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_tanh(X val) { return p_tanh<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erf(X val) { return p_erf<Z>(static_cast<Z>(val)); } template <typename X, typename Z> math_def inline Z nd4j_erfc(X val) { return p_erfc<Z>(static_cast<Z>(val)); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { int* address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) { int* address_as_ull = (int*) address; long addr = (long)(address); bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); BPAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { bfloat16 res = old.B.H + val; fresh.B.H = res; fresh.B.L = old.B.L; } else { bfloat16 res = old.B.L + val; fresh.B.L = res; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val - __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val / __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { int* address_as_ull = (int*) address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val - __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } } #endif } } #endif /* TEMPLATEMATH_H_ */
IcgNabla.c
// Copyright (C) 2016 Gernot Riegler // Institute for Computer Graphics and Vision (ICG) // Graz University of Technology (TU GRAZ) // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. All advertising materials mentioning features or use of this software // must display the following acknowledgement: // This product includes software developed by the ICG, TU GRAZ. // 4. Neither the name of the ICG, TU GRAZ nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/IcgNabla.c" #else static int icgnn_(IcgNabla_updateOutput)(lua_State *L) { THTensor* in = luaT_checkudata(L, 2, torch_Tensor); THTensor* out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); long n_dim = in->nDimension; luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected"); in = THTensor_(newContiguous)(in); real* in_data = THTensor_(data)(in); long num, channels, height, width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; THTensor_(resize3d)(out, 2 * channels, height, width); } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; THTensor_(resize4d)(out, num, 2 * channels, height, width); } real* out_data = THTensor_(data)(out); long offset = height * width; long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long in_idx = (n * height + h) * width + w; long out_x_idx = ((n * 2 + 0) * height + h) * width + w; long out_y_idx = ((n * 2 + 1) * height + h) * width + w; if(w < width - 1) { out_data[out_x_idx] = in_data[in_idx + 1] - in_data[in_idx]; } else { out_data[out_x_idx] = 0; } if(h < height - 1) { out_data[out_y_idx] = in_data[in_idx + width] - in_data[in_idx]; } else { out_data[out_y_idx] = 0; } } } } THTensor_(free)(in); return 1; } static int icgnn_(IcgNabla_updateGradInput)(lua_State *L) { THTensor *in = luaT_checkudata(L, 2, torch_Tensor); THTensor *grad_out = luaT_checkudata(L, 3, torch_Tensor); THTensor *out = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor *grad_in = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THTensor_(resizeAs)(grad_in, in); real* in_data = THTensor_(data)(in); real* out_data = THTensor_(data)(out); real* grad_in_data = THTensor_(data)(grad_in); real* grad_out_data = THTensor_(data)(grad_out); long n_dim = in->nDimension; long num, channels, height, width; if(n_dim == 3) { num = 1; channels = in->size[0]; height = in->size[1]; width = in->size[2]; } else if(n_dim == 4) { num = in->size[0]; channels = in->size[1]; height = in->size[2]; width = in->size[3]; } long offset = height * width; long n; #pragma omp parallel for private(n) for(n = 0; n < num * channels; ++n) { long h; for(h = 0; h < height; ++h) { long w; for(w = 0; w < width; ++w) { long in_idx = (n * height + h) * width + w; long out_x_idx = ((n * 2 + 0) * height + h) * width + w; long out_y_idx = ((n * 2 + 1) * height + h) * width + w; real grad = 0; //x if(w > 0) { grad = grad + (+1) * grad_out_data[out_x_idx - 1]; } if(w < width - 1) { grad = grad + (-1) * grad_out_data[out_x_idx]; } //y if(h > 0) { grad = grad + (+1) * grad_out_data[out_y_idx - width]; } if(h < height - 1) { grad = grad + (-1) * grad_out_data[out_y_idx]; } grad_in_data[in_idx] = grad; } } } return 1; } static const struct luaL_Reg icgnn_(IcgNabla__) [] = { {"IcgNabla_updateOutput", icgnn_(IcgNabla_updateOutput)}, {"IcgNabla_updateGradInput", icgnn_(IcgNabla_updateGradInput)}, {NULL, NULL} }; static void icgnn_(IcgNabla_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, icgnn_(IcgNabla__), "icgnn"); lua_pop(L,1); } #endif
krige.c
/* * krige.c * * David Garen 8/91, 3/94 * * Calculate kriging weights * * 26 May 2000: * Change solution method to LU decomposition * * Feb 2016: * - Create new function krige_grid to calculate the kriging weights * on a grid. This is meant to be called from a Python function * - Everything else is the same except the 2D arrays must now be indexed * with linear indexing * */ #include <stdio.h> #include <stdlib.h> //#include <malloc/malloc.h> #include <omp.h> #include "dk_header.h" void krige_grid(nsta, ngrid, ad, dgrid, elevations, nthreads, weights) int nsta; /* number of stations used */ int ngrid; /* number of grid cells*/ double *ad; /* matrix of distances between prec/temp stations for computing kriging weights */ double *dgrid; /* array of distances between grid cells and prec/temp stations */ double *elevations; /* vector of station elevations */ int nthreads; /* number of threads for parrallel processing */ double *weights; /* output weights */ //int nthreads; /* number of threads to use */ { double w[nsta+1]; /* kriging weights */ int i, j; /* Calculate kriging weights using all stations */ omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(nthreads); // Use N threads for all consecutive parallel regions #pragma omp parallel shared(nsta, ad, dgrid, elevations, weights) private(i, j, w) { #pragma omp for for (i = 0; i < ngrid; i++) { krige(nsta, ad, &dgrid[i*nsta], elevations, w); for (j = 0; j < nsta; j++){ weights[i*nsta + j] = w[j]; } } } // return wall; } void krige(nsta, ad, dgrid, elevations, w) int nsta; /* number of stations used */ double *ad; /* matrix of distances between prec/temp stations for computing kriging weights */ double *dgrid; /* array of distances between grid cells and prec/temp stations */ double *elevations; /* vector of station elevations */ double *w; /* kriging weights */ { double elevsave; /* stored value of station elevation */ int m, mm, n, nn, i, j; /* loop indexes */ int msave; /* stored value of m index */ int nsp1; /* ns plus 1 */ double *wcalc; /* calculation vector for weights */ int ns; /* number of stations */ int luret; /* return value from lusolv() */ int *staflg; /* station use flags*/ double temp; /* temporary variable */ int itemp; /* temporary variable */ double *dist; /* sorted distance */ int *idx; /* index sorted distance */ double **a; /* data matrix for solving for kriging weights (input to m_inv()) */ // double *w; /* kriging weights */ // double w[nsta+1]; // nsta = ns; // find the N closest stations dist = vector(nsta); idx = ivector(nsta); for (i = 0; i < nsta; ++i){ dist[i] = dgrid[i]; idx[i] = i; } for (i = 0; i < nsta; ++i){ for (j = i + 1; j < nsta; ++j){ if (dist[i] > dist[j]) { // sort the distance temp = dist[i]; dist[i] = dist[j]; dist[j] = temp; // re-sort the index itemp = idx[i]; idx[i] = idx[j]; idx[j] = itemp; } } } // set station use flags ns = 0; staflg = ivector(nsta); for (m = 0; m < nsta; m++) { staflg[idx[m]] = 1; ns++; } // for (i = 0; i < nsta; ++i){ // printf("%f - %i - %i\n",dist[i],idx[i],staflg[i]); // } // exit(0); a = dmatrix(nsta+1, nsta+2); // w = dvector(nsta+1); wcalc = dvector(nsta+1); while (1) { nsp1 = ns + 1; /* Load matrix for calculating kriging weights using only the desired stations (staflg = 1) */ mm = -1; for (m = 0; m < nsta; m++) { if (staflg[m] == 1) { mm++; nn = -1; for (n = 0; n < nsta; n++) { if (staflg[n] == 1) { nn++; a[mm][nn] = ad[m*nsta + n]; } } a[mm][ns] = a[ns][mm] = 1; a[mm][nsp1] = dgrid[m]; } } a[ns][ns] = 0; a[ns][nsp1] = 1; n = nsp1; /* Solve linear system for kriging weights */ if ((luret = lusolv(n, a, wcalc)) != 0) { printf("Error in lusolv()\n"); exit(0); } /* Check for negative weights, throw out the most distant station by elevation with a negative weight, and recalculate weights until all are positive */ elevsave = 0.0; mm = msave = -1; for (m = 0; m < nsta; m++) { if (staflg[m] == 1) { mm++; if (wcalc[mm] < 0.0) { if (elevations[m] > elevsave) { msave = m; elevsave = elevations[m]; } } } } if (msave >= 0) { staflg[msave] = 0; // set station use flag to zero for furthest station ns--; } else { mm = -1; for (m = 0; m < nsta; m++) { if (staflg[m] == 1) { mm++; w[m] = wcalc[mm]; } else w[m] = 0.0; } break; } } // free(wcalc); /* clean up 1D arrays*/ free(dist); free(idx); free(staflg); free(wcalc); /* clean up 2D arrays*/ for (m = 0; m < nsta+1; m++) { free(a[m]); } free(a); // return w; }
ligra_preprocessor.h
// This code is part of the project "Ligra: A Lightweight Graph Processing // Framework for Shared Memory", presented at Principles and Practice of // Parallel Programming, 2013. // Copyright (c) 2013 Julian Shun and Guy Blelloch // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef LIGRA_H #define LIGRA_H #include <iostream> #include <fstream> #include <stdlib.h> #include <cstring> #include <string> #include <algorithm> #include <cassert> #include "parallel.h" #include "gettime.h" #include "timer.h" //timer from GAP #include "utils.h" #include "vertex.h" #include "compressedVertex.h" #include "vertexSubset.h" #include "graph.h" #include "IO.h" #include "parseCommandLine.h" #include "gettime.h" #include "index_map.h" #include "edgeMap_utils.h" using namespace std; //*****START FRAMEWORK***** typedef uint32_t flags; const flags no_output = 1; const flags pack_edges = 2; const flags sparse_no_filter = 4; const flags dense_forward = 8; const flags dense_parallel = 16; const flags remove_duplicates = 32; inline bool should_output(const flags& fl) { return !(fl & no_output); } const int dynChunkSz = 64; //chunk size for openmp's dynamic scheduling template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapDense(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) { using D = tuple<bool, data>; long n = GA.n; vertex *G = GA.V; if (should_output(fl)) { D* next = newA(D, n); auto g = get_emdense_gen<data>(next); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long v=0; v<n; v++) { std::get<0>(next[v]) = 0; if (f.cond(v)) { G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel); } } return vertexSubsetData<data>(n, next); } else { auto g = get_emdense_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long v=0; v<n; v++) { if (f.cond(v)) { G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel); } } return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapDenseForward(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) { using D = tuple<bool, data>; long n = GA.n; vertex *G = GA.V; if (should_output(fl)) { D* next = newA(D, n); auto g = get_emdense_forward_gen<data>(next); parallel_for(long i=0;i<n;i++) { std::get<0>(next[i]) = 0; } #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long i=0; i<n; i++) { if (vertexSubset.isIn(i)) { G[i].decodeOutNgh(i, f, g); } } return vertexSubsetData<data>(n, next); } else { auto g = get_emdense_forward_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (long i=0; i<n; i++) { if (vertexSubset.isIn(i)) { G[i].decodeOutNgh(i, f, g); } } return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapSparse(graph<vertex>& GA, vertex* frontierVertices, VS& indices, uintT* degrees, uintT m, F &f, const flags fl) { using S = tuple<uintE, data>; long n = indices.n; S* outEdges; long outEdgeCount = 0; if (should_output(fl)) { uintT* offsets = degrees; outEdgeCount = sequence::plusScan(offsets, offsets, m); outEdges = newA(S, outEdgeCount); auto g = get_emsparse_gen<data>(outEdges); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (size_t i = 0; i < m; i++) { uintT v = indices.vtx(i), o = offsets[i]; vertex vert = frontierVertices[i]; vert.decodeOutNghSparse(v, o, f, g); } } else { auto g = get_emsparse_nooutput_gen<data>(); #pragma omp parallel for schedule (dynamic, dynChunkSz) for (size_t i = 0; i < m; i++) { uintT v = indices.vtx(i); vertex vert = frontierVertices[i]; vert.decodeOutNghSparse(v, 0, f, g); } } if (should_output(fl)) { S* nextIndices = newA(S, outEdgeCount); if (fl & remove_duplicates) { if (GA.flags == NULL) { GA.flags = newA(uintE, n); parallel_for(long i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; } } auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(outEdges[i]); }; remDuplicates(get_key, GA.flags, outEdgeCount, n); } auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; }; size_t nextM = pbbs::filterf(outEdges, nextIndices, outEdgeCount, p); free(outEdges); return vertexSubsetData<data>(n, nextM, nextIndices); } else { return vertexSubsetData<data>(n); } } template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapSparse_no_filter(graph<vertex>& GA, vertex* frontierVertices, VS& indices, uintT* offsets, uintT m, F& f, const flags fl) { using S = tuple<uintE, data>; long n = indices.n; long outEdgeCount = sequence::plusScan(offsets, offsets, m); S* outEdges = newA(S, outEdgeCount); auto g = get_emsparse_no_filter_gen<data>(outEdges); // binary-search into scan to map workers->chunks size_t b_size = 10000; size_t n_blocks = nblocks(outEdgeCount, b_size); uintE* cts = newA(uintE, n_blocks+1); size_t* block_offs = newA(size_t, n_blocks+1); auto offsets_m = make_in_imap<uintT>(m, [&] (size_t i) { return offsets[i]; }); auto lt = [] (const uintT& l, const uintT& r) { return l < r; }; parallel_for(size_t i=0; i<n_blocks; i++) { size_t s_val = i*b_size; block_offs[i] = pbbs::binary_search(offsets_m, s_val, lt); } block_offs[n_blocks] = m; #pragma omp parallel for schedule (dynamic, dynChunkSz / 8) for (size_t i=0; i<n_blocks; i++) { if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) { // start and end are offsets in [m] size_t start = block_offs[i]; size_t end = block_offs[i+1]; uintT start_o = offsets[start]; uintT k = start_o; for (size_t j=start; j<end; j++) { uintE v = indices.vtx(j); size_t num_in = frontierVertices[j].decodeOutNghSparseSeq(v, k, f, g); k += num_in; } cts[i] = (k - start_o); } else { cts[i] = 0; } } long outSize = sequence::plusScan(cts, cts, n_blocks); cts[n_blocks] = outSize; S* out = newA(S, outSize); parallel_for (size_t i=0; i<n_blocks; i++) { if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) { size_t start = block_offs[i]; size_t start_o = offsets[start]; size_t out_off = cts[i]; size_t block_size = cts[i+1] - out_off; for (size_t j=0; j<block_size; j++) { out[out_off + j] = outEdges[start_o + j]; } } } free(outEdges); free(cts); free(block_offs); if (fl & remove_duplicates) { if (GA.flags == NULL) { GA.flags = newA(uintE, n); parallel_for(size_t i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; } } auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(out[i]); }; remDuplicates(get_key, GA.flags, outSize, n); S* nextIndices = newA(S, outSize); auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; }; size_t nextM = pbbs::filterf(out, nextIndices, outSize, p); free(out); return vertexSubsetData<data>(n, nextM, nextIndices); } return vertexSubsetData<data>(n, outSize, out); } // Decides on sparse or dense base on number of nonzeros in the active vertices. template <class data, class vertex, class VS, class F> vertexSubsetData<data> edgeMapData(graph<vertex>& GA, VS &vs, F f, intT threshold = -1, const flags& fl=0) { long numVertices = GA.n, numEdges = GA.m, m = vs.numNonzeros(); if(threshold == -1) threshold = numEdges/20; //default threshold vertex *G = GA.V; if (numVertices != vs.numRows()) { cout << "edgeMap: Sizes Don't match" << endl; abort(); } if (vs.size() == 0) return vertexSubsetData<data>(numVertices); vs.toSparse(); uintT* degrees = newA(uintT, m); vertex* frontierVertices = newA(vertex,m); {parallel_for (size_t i=0; i < m; i++) { uintE v_id = vs.vtx(i); vertex v = G[v_id]; degrees[i] = v.getOutDegree(); frontierVertices[i] = v; }} uintT outDegrees = sequence::plusReduce(degrees, m); if (outDegrees == 0) return vertexSubsetData<data>(numVertices); if (m + outDegrees > threshold) { vs.toDense(); free(degrees); free(frontierVertices); return (fl & dense_forward) ? edgeMapDenseForward<data, vertex, VS, F>(GA, vs, f, fl) : edgeMapDense<data, vertex, VS, F>(GA, vs, f, fl); } else { auto vs_out = (should_output(fl) && fl & sparse_no_filter) ? // only call snof when we output edgeMapSparse_no_filter<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl) : edgeMapSparse<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl); free(degrees); free(frontierVertices); return vs_out; } } // Regular edgeMap, where no extra data is stored per vertex. template <class vertex, class VS, class F> vertexSubset edgeMap(graph<vertex> GA, VS& vs, F f, intT threshold = -1, const flags& fl=0) { return edgeMapData<pbbs::empty>(GA, vs, f, threshold, fl); } /* General function to print stats about frontier size */ template <class VS> void frontierStats(VS& vs, long numVertices) { double percent = (static_cast<double>(vs.size()) / static_cast<double>(numVertices)) * 100; if (vs.dense()) { std::cout << "PULL iteration. Frontier size = " << percent << std::endl; } else { std::cout << "PUSH iteration. Frontier size = " << percent << std::endl; } return; } // Packs out the adjacency lists of all vertex in vs. A neighbor, ngh, is kept // in the new adjacency list if p(ngh) is true. // Weighted graphs are not yet supported, but this should be easy to do. template <class vertex, class P> vertexSubsetData<uintE> packEdges(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) { using S = tuple<uintE, uintE>; vs.toSparse(); vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows(); if (vs.size() == 0) { return vertexSubsetData<uintE>(n); } auto degrees = array_imap<uintT>(m); granular_for(i, 0, m, (m > 2000), { uintE v = vs.vtx(i); degrees[i] = G[v].getOutDegree(); }); long outEdgeCount = pbbs::scan_add(degrees, degrees); S* outV; if (should_output(fl)) { outV = newA(S, vs.size()); } bool* bits = newA(bool, outEdgeCount); uintE* tmp1 = newA(uintE, outEdgeCount); uintE* tmp2 = newA(uintE, outEdgeCount); if (should_output(fl)) { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t offset = degrees[i]; auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]); auto tmp2Off = &(tmp2[offset]); size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off); outV[i] = make_tuple(v, ct); } } else { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t offset = degrees[i]; auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]); auto tmp2Off = &(tmp2[offset]); size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off); } } free(bits); free(tmp1); free(tmp2); if (should_output(fl)) { return vertexSubsetData<uintE>(n, m, outV); } else { return vertexSubsetData<uintE>(n); } } template <class vertex, class P> vertexSubsetData<uintE> edgeMapFilter(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) { vs.toSparse(); if (fl & pack_edges) { return packEdges<vertex, P>(GA, vs, p, fl); } vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows(); using S = tuple<uintE, uintE>; if (vs.size() == 0) { return vertexSubsetData<uintE>(n); } S* outV; if (should_output(fl)) { outV = newA(S, vs.size()); } if (should_output(fl)) { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t ct = G[v].countOutNgh(v, p); outV[i] = make_tuple(v, ct); } } else { parallel_for (size_t i=0; i<m; i++) { uintE v = vs.vtx(i); size_t ct = G[v].countOutNgh(v, p); } } if (should_output(fl)) { return vertexSubsetData<uintE>(n, m, outV); } else { return vertexSubsetData<uintE>(n); } } //*****VERTEX FUNCTIONS***** template <class F, class VS, typename std::enable_if< !std::is_same<VS, vertexSubset>::value, int>::type=0 > void vertexMap(VS& V, F f) { size_t n = V.numRows(), m = V.numNonzeros(); if(V.dense()) { parallel_for(long i=0;i<n;i++) { if(V.isIn(i)) { f(i, V.ithData(i)); } } } else { parallel_for(long i=0;i<m;i++) { f(V.vtx(i), V.vtxData(i)); } } } template <class VS, class F, typename std::enable_if< std::is_same<VS, vertexSubset>::value, int>::type=0 > void vertexMap(VS& V, F f) { size_t n = V.numRows(), m = V.numNonzeros(); if(V.dense()) { parallel_for(long i=0;i<n;i++) { if(V.isIn(i)) { f(i); } } } else { parallel_for(long i=0;i<m;i++) { f(V.vtx(i)); } } } //Note: this is the version of vertexMap in which only a subset of the //input vertexSubset is returned template <class F> vertexSubset vertexFilter(vertexSubset V, F filter) { long n = V.numRows(), m = V.numNonzeros(); V.toDense(); bool* d_out = newA(bool,n); {parallel_for(long i=0;i<n;i++) d_out[i] = 0;} {parallel_for(long i=0;i<n;i++) if(V.d[i]) d_out[i] = filter(i);} return vertexSubset(n,d_out); } template <class F> vertexSubset vertexFilter2(vertexSubset V, F filter) { long n = V.numRows(), m = V.numNonzeros(); if (m == 0) { return vertexSubset(n); } bool* bits = newA(bool, m); V.toSparse(); {parallel_for(size_t i=0; i<m; i++) { uintE v = V.vtx(i); bits[i] = filter(v); }} auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); }); auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; }); auto out = pbbs::pack(v_imap, bits_m); out.alloc = false; free(bits); return vertexSubset(n, out.size(), out.s); } template <class data, class F> vertexSubset vertexFilter2(vertexSubsetData<data> V, F filter) { long n = V.numRows(), m = V.numNonzeros(); if (m == 0) { return vertexSubset(n); } bool* bits = newA(bool, m); V.toSparse(); parallel_for(size_t i=0; i<m; i++) { auto t = V.vtxAndData(i); bits[i] = filter(std::get<0>(t), std::get<1>(t)); } auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); }); auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; }); auto out = pbbs::pack(v_imap, bits_m); out.alloc = false; free(bits); return vertexSubset(n, out.size(), out.s); } //cond function that always returns true inline bool cond_true (intT d) { return 1; } template<class vertex> void Compute(graph<vertex>&, commandLine, pvector<uintE> &new_ids); int parallel_main(int argc, char* argv[]) { commandLine P(argc,argv," [-s] <inFile>"); char* iFile = P.getArgument(0); bool symmetric = P.getOptionValue("-s"); bool compressed = P.getOptionValue("-c"); bool binary = P.getOptionValue("-b"); bool mmap = P.getOptionValue("-m"); bool isPageRank = (P.getOptionIntValue("-pagerank", -1) == 1); bool isDenseWrite = (P.getOptionIntValue("-densewrite", -1) == 1); /* preprocessing options : 0 - outdegsort, 1 - indegsort, else - no-preprocessing */ int preprocess = P.getOptionIntValue("-preprocess", -1); //cout << "mmap = " << mmap << endl; long rounds = P.getOptionLongValue("-rounds",3); if (compressed) { assert(false); #if 0 if (symmetric) { graph<compressedSymmetricVertex> G = readCompressedGraph<compressedSymmetricVertex>(iFile,symmetric,mmap); //symmetric graph Compute(G,P); for(int r=0;r<rounds;r++) { startTime(); Compute(G,P); nextTime("Running time"); } G.del(); } else { graph<compressedAsymmetricVertex> G = readCompressedGraph<compressedAsymmetricVertex>(iFile,symmetric,mmap); //asymmetric graph Compute(G,P); if(G.transposed) G.transpose(); for(int r=0;r<rounds;r++) { startTime(); Compute(G,P); nextTime("Running time"); if(G.transposed) G.transpose(); } G.del(); } #endif } else { if (symmetric) { graph<symmetricVertex> G = readGraph<symmetricVertex>(iFile,compressed,symmetric,binary,mmap); //symmetric graph if (preprocess == 0 || preprocess == 1) { for (int r = 0; r < rounds; ++r) { pvector<uintE> new_ids(G.n, UINT_E_MAX); graph<symmetricVertex> newG = preprocessGraph<symmetricVertex>(G, symmetric, (preprocess == 0), new_ids); newG.del(); } G.del(); } } else { graph<asymmetricVertex> G = readGraph<asymmetricVertex>(iFile,compressed,symmetric,binary,mmap); //asymmetric graph if (preprocess == 0 || preprocess == 1) { for (int r = 0; r < rounds; ++r) { pvector<uintE> new_ids(G.n, UINT_E_MAX); graph<asymmetricVertex> newG = preprocessGraph<asymmetricVertex>(G, symmetric, (preprocess == 0), new_ids, isPageRank, isDenseWrite); newG.del(); } G.del(); } } } } #endif
axcrypt_fmt_plug.c
/* AxCrypt 1.x encrypted files cracker patch for JtR * 2016 by Fist0urs <eddy.maaalou at gmail.com>. * * This software is Copyright (c) 2016, Fist0urs <eddy.maaalou at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_axcrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_axcrypt); #else #include <string.h> #include "stdint.h" #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "dyna_salt.h" #include "sha.h" #include "aes.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "axcrypt" #define FORMAT_NAME "AxCrypt" #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 /* actual max is 250 */ #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt *) #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_ALIGN sizeof(struct custom_salt *) /* constant value recommended by FIPS */ #define AES_WRAPPING_IV "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6" #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define PUT_64BITS_XOR_MSB(cp, value) ( \ (cp)[0] ^= (unsigned char)((value)), \ (cp)[1] ^= (unsigned char)((value) >> 8), \ (cp)[2] ^= (unsigned char)((value) >> 16), \ (cp)[3] ^= (unsigned char)((value) >> 24 ) ) static struct fmt_tests axcrypt_tests[] = { /* formats can be: $axcrypt$*version*iterations*salt*wrappedkey $axcrypt$*version*iterations*salt*wrappedkey*key-file */ {"$axcrypt$*1*1337*0fd9e7e2f907f480f8af162564f8f94b*af10c88878ba4e2c89b12586f93b7802453121ee702bc362", "Bab00nmoNCo|\\|2$inge"}, {"$axcrypt$*1*60000*7522aa07694d441e47f8faad8a8cb984*95e02b7ccbdc27c227a80d1307505d8b769e87b32f312aa1", "nuNuche<3rewshauv"}, {"$axcrypt$*1*31014*3408ae91dddc0b1750ed4223fd843364*1cc0f8fa8d89f44d284d0562ac7e93848c86ce9605907129", "tr0pO$phere5apointzero"}, /* axcrypt created key-file */ {"$axcrypt$*1*38574*ce4f58c1e85df1ea921df6d6c05439b4*3278c3c730f7887b1008e852e59997e2196710a5c6bc1813*66664a6b2074434a4520374d73592055626979204a6b755520736d6b4b20394e694a205548444320524578562065674b33202f42593d", "0v3rgo2|<fc!"}, /* custom key-file */ {"$axcrypt$*1*130885*8eb4d745f7ac3f7505bcf14e8ce7e3b4*5221a6e8277e90b0b4f16f7871fca02986fca55c0dec5e59*22486520646f65736e2774206c696b652047656f726765204d69636861656c3a20426f6f6f6f6f6f220d0a0d0a49206665656c20736f20756e737572650d0a417320492074616b6520796f75722068616e6420616e64206c65616420796f7520746f207468652062616e6365666c6f6f720d0a417320746865206d75736963207374617274732c20736f6d657468696e6720696e20796f757220657965730d0a43616c6c7320746f206d696e642074686520676f6c64656e2073637265656e0d0a416e6420616c6c206974277320736169642069732068690d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a54696d652063616e206e65766572206d656e640d0a54686520636172656c657373207768697370657273206f66206120676f6f6420667269656e640d0a546f2074686520686561727420616e64206d696e640d0a49676e6f72616e6365206973206b696e640d0a54686572652773206e6f20636f6d666f727420696e207468652074727574680d0a5061696e20697320616c6c20796f75276c6c2066696e640d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a492073686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a4e6576657220776974686f757420796f7572206c6f76650d0a0d0a546f6e6967687420746865206d75736963207365656d7320736f206c6f75640d0a492077697368207468617420776520636f756c64206c6f736520746869732063726f77640d0a4d617962652069742773206265747465722074686973207761790d0a5765276420687572742065616368206f74686572207769746820746865207468696e677320776527642077616e7420746f207361790d0a0d0a576520636f756c642068617665206265656e20736f20676f6f6420746f6765746865720d0a576520636f756c642068617665206c6976656420746869732064616e636520666f72657665720d0a427574206e6f772077686f277320676f6e6e612064616e63652077697468206d650d0a506c6561736520737461790d0a0d0a416e642049276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a284e6f77207468617420796f7527726520676f6e6529204e6f77207468617420796f7527726520676f6e650d0a284e6f77207468617420796f7527726520676f6e65292057686174204920646964277320736f2077726f6e672c20736f2077726f6e670d0a5468617420796f752068616420746f206c65617665206d6520616c6f6e65", "careless whisper"}, {NULL} }; static char (*saved_key) [PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { dyna_salt dsalt; int version; uint32_t key_wrapping_rounds; unsigned char salt[16]; unsigned char wrappedkey[24]; char* keyfile; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(cracked_size, 1); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; char *ctcopy; char *keeptr; if (strncmp(ciphertext, "$axcrypt$*", 10) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 10; /* skip over "$axcrypt$*" */ if ((p = strtokm(ctcopy, "*")) == NULL) /* version */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if (!atoi(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != 32 || !ishexlc(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* wrappedkey */ goto err; if (strlen(p) != 48 || !ishexlc(p)) goto err; /* optional key-file following */ MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; static void *ptr; cs.keyfile = NULL; ctcopy += 10; /* skip over "$axcrypt$*" */ p = strtokm(ctcopy, "*"); cs.version = atoi(p); p = strtokm(NULL, "*"); cs.key_wrapping_rounds = (uint32_t) atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 24; i++) cs.wrappedkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; /* if key-file present */ if ((p = strtokm(NULL, "*")) != NULL){ cs.keyfile = (char*) mem_calloc_tiny(strlen(p)/2 + 1, sizeof(char)); for (i = 0; i < strlen(p)/2; i++) cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } MEM_FREE(keeptr); cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, salt); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, salt, wrappedkey, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); memcpy(ptr, &cs, sizeof(struct custom_salt)); return (void *) &ptr; } static void set_salt(void *salt) { cur_salt = *(struct custom_salt **) salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* NUMBER_AES_BLOCKS = 2 AES_BLOCK_SIZE = 16 */ unsigned char KEK[20], lsb[24], cipher[16]; AES_KEY akey; SHA_CTX ctx; int i, j, nb_iterations = cur_salt->key_wrapping_rounds; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *) saved_key[index], strlen(saved_key[index])); /* if key-file provided */ if (cur_salt->keyfile != NULL) SHA1_Update(&ctx, (unsigned char *) cur_salt->keyfile, strlen(cur_salt->keyfile)); SHA1_Final( KEK, &ctx ); /* hash XOR salt => KEK */ for (i = 0; i < sizeof(cur_salt->salt); i++) KEK[i] ^= cur_salt->salt[i]; memcpy(lsb, cur_salt->wrappedkey + 8, 16); memset(&akey, 0, sizeof(AES_KEY)); AES_set_decrypt_key(KEK, 128, &akey); /* set msb */ memcpy(cipher, cur_salt->wrappedkey, 8); /* custom AES un-wrapping loop */ for (j = nb_iterations - 1; j >= 0; j--) { /* 1st block treatment */ /* MSB XOR (NUMBER_AES_BLOCKS * j + i) */ PUT_64BITS_XOR_MSB(cipher, 2 * j + 2); /* R[i] */ memcpy(cipher + 8, lsb + 8, 8); /* AES_ECB(KEK, (MSB XOR (NUMBER_AES_BLOCKS * j + i)) | R[i]) */ AES_decrypt(cipher, cipher, &akey); memcpy(lsb + 8, cipher + 8, 8); /* 2nd block treatment */ PUT_64BITS_XOR_MSB(cipher, 2 * j + 1); memcpy(cipher + 8, lsb, 8); AES_decrypt(cipher, cipher, &akey); memcpy(lsb, cipher + 8, 8); } if (!memcmp(cipher, AES_WRAPPING_IV, 8)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } static void axcrypt_set_key(char *key, int index) { int saved_len = strlen(key); memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_axcrypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT, { NULL }, axcrypt_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, axcrypt_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif
thread-limit-3.c
#include <stdlib.h> #include <omp.h> int main () { #pragma omp target if (0) #pragma omp teams thread_limit (1) if (omp_get_thread_limit () != 1) abort (); return 0; }
bml_norm_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_norm.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_norm_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Calculate the sum of squares of the elements of a matrix. * * \ingroup norm_group * * \param A The matrix A * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_ellpack) ( bml_matrix_ellpack_t * A) { int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; int myRank = bml_getMyRank(); int rowMin = A_localRowMin[myRank]; int rowMax = A_localRowMax[myRank]; #ifdef USE_OMP_OFFLOAD //#pragma omp target map(tofrom:sum) #pragma omp target update from(A_nnz[:N], A_value[:N*M]) #endif #pragma omp parallel for \ shared(N, M, A_value, A_nnz) \ shared(rowMin, rowMax) \ reduction(+:sum) for (int i = rowMin; i < rowMax; i++) { for (int j = 0; j < A_nnz[i]; j++) { REAL_T xval = A_value[ROWMAJOR(i, j, N, M)]; sum += xval * xval; } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of all the core elements of a submatrix. * * \ingroup norm_group * * \param A The matrix * \param core_pos Core rows of submatrix * \param core_size Number of core rows * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_submatrix_ellpack) ( bml_matrix_ellpack_t * A, int core_size) { int N = A->N; int M = A->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; #ifdef USE_OMP_OFFLOAD //#pragma omp target map(tofrom:sum) #pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M]) #endif #pragma omp parallel for \ reduction(+:sum) for (int i = 0; i < core_size; i++) { for (int j = 0; j < A_nnz[i]; j++) { if (A_index[ROWMAJOR(i, j, N, M)] < core_size) { REAL_T value = A_value[ROWMAJOR(i, j, N, M)]; sum += value * value; } } } return (double) REAL_PART(sum); } /** Calculate the sum of the elements of \alpha A(i,j) * B(i,j). * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \param alpha Multiplier for A * \pram threshold Threshold * \return The sum of squares of \alpha A(i,j) * B(i,j) */ double TYPED_FUNC( bml_sum_AB_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, double alpha, double threshold) { int A_N = A->N; int A_M = A->M; int B_N = B->N; int B_M = B->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; int *B_index = (int *) B->index; int *B_nnz = (int *) B->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; REAL_T alpha_ = (REAL_T) alpha; int myRank = bml_getMyRank(); int rowMin = A_localRowMin[myRank]; int rowMax = A_localRowMax[myRank]; #if !(defined(__IBMC__) || defined(__ibmxl__)) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(y, 0.0, A_N * sizeof(REAL_T)); memset(ix, 0, A_N * sizeof(int)); memset(jjb, 0, A_N * sizeof(int)); #endif #ifdef USE_OMP_OFFLOAD //#pragma omp target map(tofrom:sum) #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #pragma omp target update from(B_nnz[:B_N], B_index[:B_N*B_M], B_value[:B_N*B_M]) #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(alpha_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ reduction(+:sum) #else #pragma omp parallel for \ shared(alpha_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ firstprivate(ix, jjb, y) \ reduction(+:sum) #endif //for (int i = 0; i < A_N; i++) for (int i = rowMin; i < rowMax; i++) { #if defined(__IBMC__) || defined(__ibmxl__) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(ix, 0, A_N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += alpha_ * A_value[ROWMAJOR(i, jp, A_N, A_M)]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, B_N, B_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] *= B_value[ROWMAJOR(i, jp, B_N, B_M)]; } for (int jp = 0; jp < l; jp++) { if (ABS(y[jjb[jp]]) > threshold) sum += y[jjb[jp]]; //* y[jjb[jp]]; ix[jjb[jp]] = 0; y[jjb[jp]] = 0.0; jjb[jp] = 0; } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of the elements of \alpha A + \beta B. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \param alpha Multiplier for A * \param beta Multiplier for B * \pram threshold Threshold * \return The sum of squares of \alpha A + \beta B */ double TYPED_FUNC( bml_sum_squares2_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, double alpha, double beta, double threshold) { int A_N = A->N; int A_M = A->M; int B_N = B->N; int B_M = B->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; int *B_index = (int *) B->index; int *B_nnz = (int *) B->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; REAL_T alpha_ = (REAL_T) alpha; REAL_T beta_ = (REAL_T) beta; int myRank = bml_getMyRank(); int rowMin = A_localRowMin[myRank]; int rowMax = A_localRowMax[myRank]; #if !(defined(__IBMC__) || defined(__ibmxl__)) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(y, 0.0, A_N * sizeof(REAL_T)); memset(ix, 0, A_N * sizeof(int)); memset(jjb, 0, A_N * sizeof(int)); #endif #ifdef USE_OMP_OFFLOAD //#pragma omp target map(tofrom:sum) #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #pragma omp target update from(B_nnz[:B_N], B_index[:B_N*B_M], B_value[:B_N*B_M]) #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ reduction(+:sum) #else #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ firstprivate(ix, jjb, y) \ reduction(+:sum) #endif //for (int i = 0; i < A_N; i++) for (int i = rowMin; i < rowMax; i++) { #if defined(__IBMC__) || defined(__ibmxl__) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(ix, 0, A_N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += alpha_ * A_value[ROWMAJOR(i, jp, A_N, A_M)]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, B_N, B_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += beta_ * B_value[ROWMAJOR(i, jp, B_N, B_M)]; } for (int jp = 0; jp < l; jp++) { if (ABS(y[jjb[jp]]) > threshold) sum += y[jjb[jp]] * y[jjb[jp]]; ix[jjb[jp]] = 0; y[jjb[jp]] = 0.0; jjb[jp] = 0; } } return (double) REAL_PART(sum); } /** Calculate the Frobenius norm of matrix A. * * \ingroup norm_group * * \param A The matrix A * \return The Frobenius norm of A */ double TYPED_FUNC( bml_fnorm_ellpack) ( bml_matrix_ellpack_t * A) { double fnorm = TYPED_FUNC(bml_sum_squares_ellpack) (A); #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); } /** Calculate the Frobenius norm of 2 matrices. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \return The Frobenius norm of A-B */ double TYPED_FUNC( bml_fnorm2_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B) { int N = A->N; int M = A->M; double fnorm = 0.0; REAL_T rvalue; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T *A_value = (REAL_T *) A->value; int *B_nnz = (int *) B->nnz; int *B_index = (int *) B->index; REAL_T *B_value = (REAL_T *) B->value; REAL_T temp; int myRank = bml_getMyRank(); int rowMin = A_localRowMin[myRank]; int rowMax = A_localRowMax[myRank]; #ifdef USE_OMP_OFFLOAD //#pragma omp target map(tofrom:fnorm) #pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M]) #endif #pragma omp parallel for \ private(rvalue, temp) \ shared(N, M, A_nnz, A_index, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_nnz, B_index, B_value) \ reduction(+:fnorm) //for (int i = 0; i < N; i++) for (int i = rowMin; i < rowMax; i++) { for (int j = 0; j < A_nnz[i]; j++) { for (int k = 0; k < B_nnz[i]; k++) { if (A_index[ROWMAJOR(i, j, N, M)] == B_index[ROWMAJOR(i, k, N, M)]) { rvalue = B_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } temp = A_value[ROWMAJOR(i, j, N, M)] - rvalue; fnorm += temp * temp; } for (int j = 0; j < B_nnz[i]; j++) { for (int k = 0; k < A_nnz[i]; k++) { if (A_index[ROWMAJOR(i, k, N, M)] == B_index[ROWMAJOR(i, j, N, M)]) { rvalue = A_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } if (rvalue == 0.0) { temp = B_value[ROWMAJOR(i, j, N, M)]; fnorm += temp * temp; } } } #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); }
GB_unop__abs_bool_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_bool_bool) // op(A') function: GB (_unop_tran__abs_bool_bool) // C type: bool // A type: bool // cast: bool cij = aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_bool_bool) ( bool *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; bool z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_bool_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
random.c
/****************************************************************************** * * * RANDOM.C * * * * WRAPPERS FOR RANDOM NUMBER GENERATOR * * * ******************************************************************************/ #include "decs.h" static gsl_rng **rng; // Use Mersenne twister void init_random(int seed) { rng = safe_malloc(nthreads * sizeof(gsl_rng *)); #pragma omp parallel { rng[omp_get_thread_num()] = gsl_rng_alloc(gsl_rng_mt19937); gsl_rng_set(rng[omp_get_thread_num()], seed + omp_get_thread_num()); } } double get_rand() { return gsl_rng_uniform(rng[omp_get_thread_num()]); } double get_chisq(double nu) { return gsl_ran_chisq(rng[omp_get_thread_num()], nu); } void get_ran_dir_3d(double *nx, double *ny, double *nz) { gsl_ran_dir_3d(rng[omp_get_thread_num()], nx, ny, nz); } double get_gaussian(double mu, double sigma) { double x = gsl_ran_gaussian(rng[omp_get_thread_num()], sigma); double z = mu + x; return z; }
fs_csc_executor.h
/* ****** Parallel outer loop implementation using level set */ int fs_csc_executor(int n, int* Lp, int* Li, double* Lx, double *x, int levels, int *levelPtr, int *levelSet, int chunk){ if (!Lp || !Li || !x) return (0) ; /* check inputs */ for (int l = 0; l < levels; ++l) { int li=0; #pragma omp parallel for \ default(shared) private(li) \ schedule(auto) for ( li = levelPtr[l]; li < levelPtr[l + 1]; ++li) { int j = levelSet[li]; x [j] /= Lx [Lp [j]] ; for (int p = Lp [j]+1 ; p < Lp [j+1] ; p++) { double tmp = Lx [p] * x [j] ; int idx = Li[p]; #pragma omp atomic x [idx] -= tmp ; } } } return (1) ; } /* ****** Parallel outer loop implementation using ParSy paper (LBC) */ int fs_csc_executor_H2 (int n, int* Lp, int* Li, double* Lx, double *x, int levels, int *levelPtr, int *levelSet, int parts, int *parPtr, int *partition, int chunk){ if (!Lp || !Li || !x) return (0) ; /* check inputs */ for (int i1 = 0; i1 < levels ; ++i1) { #pragma omp parallel //shared(lValues)//private(map, contribs) { #pragma omp for schedule(auto) for (int j1 = levelPtr[i1]; j1 < levelPtr[i1 + 1]; ++j1) { for (int k1 = parPtr[j1]; k1 < parPtr[j1 + 1]; ++k1) { int j = partition[k1]; x[j] /= Lx[Lp[j]]; // #pragma omp critical for (int p = Lp[j] + 1; p < Lp[j + 1]; p++) { double tmp = Lx[p] * x[j]; int idx = Li[p]; #pragma omp atomic x[idx] -= tmp ; } } } } } return (1) ; }
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 16297 2014-02-24 20:36:56Z karypis $ \endverbatim */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color, 1)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon; size_t lnlen; ssize_t *rowptr; int *rowind, *iinds, *jinds, ival; float *rowval=NULL, *vals, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); switch (format) { case GK_CSR_FMT_BINROW: mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; break; case GK_CSR_FMT_BINCOL: mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; break; case GK_CSR_FMT_IJV: gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && 3*nrows != nnz) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 3.\n", nnz, readvals); if (readvals == 0 && 2*nrows != nnz) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 2.\n", nnz, readvals); nnz = nrows; numbering = (numbering ? - 1 : 0); /* read the data into three arrays */ iinds = gk_i32malloc(nnz, "iinds"); jinds = gk_i32malloc(nnz, "jinds"); vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL); fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); for (nrows=0, ncols=0, i=0; i<nnz; i++) { if (readvals) { if (fscanf(fpin, "%d %d %f", &iinds[i], &jinds[i], &vals[i]) != 3) gk_errexit(SIGERR, "Error: Failed to read (i, j, val) for nnz: %zd.\n", i); } else { if (fscanf(fpin, "%d %d", &iinds[i], &jinds[i]) != 2) gk_errexit(SIGERR, "Error: Failed to read (i, j) value for nnz: %zd.\n", i); } iinds[i] += numbering; jinds[i] += numbering; if (nrows < iinds[i]) nrows = iinds[i]; if (ncols < jinds[i]) ncols = jinds[i]; } nrows++; ncols++; gk_fclose(fpin); /* convert (i, j, v) into a CSR matrix */ mat = gk_csr_Create(); mat->nrows = nrows; mat->ncols = ncols; rowptr = mat->rowptr = gk_zsmalloc(nrows+1, 0, "rowptr"); rowind = mat->rowind = gk_i32malloc(nnz, "rowind"); if (readvals) rowval = mat->rowval = gk_fmalloc(nnz, "rowval"); for (i=0; i<nnz; i++) rowptr[iinds[i]]++; MAKECSR(i, nrows, rowptr); for (i=0; i<nnz; i++) { rowind[rowptr[iinds[i]]] = jinds[i]; if (readvals) rowval[rowptr[iinds[i]]] = vals[i]; rowptr[iinds[i]]++; } SHIFTCSR(i, nrows, rowptr); gk_free((void **)&iinds, &jinds, &vals, LTERM); return mat; break; case GK_CSR_FMT_BIJV: mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); if (fread(&nnz, sizeof(size_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nnz from file %s!\n", filename); if (fread(&readvals, sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the readvals from file %s!\n", filename); /* read the data into three arrays */ iinds = gk_i32malloc(nnz, "iinds"); jinds = gk_i32malloc(nnz, "jinds"); vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL); for (i=0; i<nnz; i++) { if (fread(&(iinds[i]), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read iinds[i] from file %s!\n", filename); if (fread(&(jinds[i]), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read jinds[i] from file %s!\n", filename); if (readvals) { if (fread(&(vals[i]), sizeof(float), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read vals[i] from file %s!\n", filename); } //printf("%d %d\n", iinds[i], jinds[i]); } gk_fclose(fpin); /* convert (i, j, v) into a CSR matrix */ rowptr = mat->rowptr = gk_zsmalloc(mat->nrows+1, 0, "rowptr"); rowind = mat->rowind = gk_i32malloc(nnz, "rowind"); if (readvals) rowval = mat->rowval = gk_fmalloc(nnz, "rowval"); for (i=0; i<nnz; i++) rowptr[iinds[i]]++; MAKECSR(i, mat->nrows, rowptr); for (i=0; i<nnz; i++) { rowind[rowptr[iinds[i]]] = jinds[i]; if (readvals) rowval[rowptr[iinds[i]]] = vals[i]; rowptr[iinds[i]]++; } SHIFTCSR(i, mat->nrows, rowptr); gk_free((void **)&iinds, &jinds, &vals, LTERM); return mat; break; /* the following are handled by a common input code, that comes after the switch */ case GK_CSR_FMT_CLUTO: fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; break; case GK_CSR_FMT_METIS: fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); break; case GK_CSR_FMT_CSR: readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); break; default: gk_errexit(SIGERR, "Unknown csr format.\n"); return NULL; } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? -1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL, GK_CSR_FMT_BIJV. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; int32_t edge[2]; FILE *fpout; switch (format) { case GK_CSR_FMT_METIS: if (mat->nrows != mat->ncols || mat->rowptr[mat->nrows]%2 == 1) gk_errexit(SIGERR, "METIS output format requires a square symmetric matrix.\n"); if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; fprintf(fpout, "%d %zd\n", mat->nrows, mat->rowptr[mat->nrows]/2); for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) fprintf(fpout, " %d", mat->rowind[j]+1); fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); break; case GK_CSR_FMT_BINROW: if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; break; case GK_CSR_FMT_BINCOL: if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; break; case GK_CSR_FMT_IJV: if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); numbering = (numbering ? 1 : 0); for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { if (writevals) fprintf(fpout, "%zd %d %.8f\n", i+numbering, mat->rowind[j]+numbering, mat->rowval[j]); else fprintf(fpout, "%zd %d\n", i+numbering, mat->rowind[j]+numbering); } } gk_fclose(fpout); return; break; case GK_CSR_FMT_BIJV: if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(&(mat->rowptr[mat->nrows]), sizeof(size_t), 1, fpout); fwrite(&writevals, sizeof(int32_t), 1, fpout); for (i=0; i<mat->nrows; i++) { edge[0] = i; for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { edge[1] = mat->rowind[j]; fwrite(edge, sizeof(int32_t), 2, fpout); if (writevals) fwrite(&(mat->rowval[j]), sizeof(float), 1, fpout); } } gk_fclose(fpout); return; break; default: if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); #pragma omp master { printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); } #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Returns a new matrix whose rows/columns are shuffled. \param mat the matrix to be shuffled, \param what indicates if the rows (GK_CSR_ROW), columns (GK_CSR_COL), or both (GK_CSR_ROWCOL) will be shuffled, \param symmetric indicates if the same shuffling will be applied to both rows and columns. This is valid with nrows==ncols and GK_CSR_ROWCOL was specified. \returns the shuffled matrix. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Shuffle(gk_csr_t *mat, int what, int symmetric) { ssize_t i, j; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; int *rperm, *cperm; float *rowval, *nrowval; gk_csr_t *nmat; if (what == GK_CSR_ROWCOL && symmetric && mat->nrows != mat->ncols) gk_errexit(SIGERR, "The matrix is not square for a symmetric rowcol shuffling.\n"); nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; rperm = gk_imalloc(nrows, "gk_csr_Shuffle: rperm"); cperm = gk_imalloc(ncols, "gk_csr_Shuffle: cperm"); switch (what) { case GK_CSR_ROW: gk_RandomPermute(nrows, rperm, 1); for (i=0; i<20; i++) gk_RandomPermute(nrows, rperm, 0); for (i=0; i<ncols; i++) cperm[i] = i; break; case GK_CSR_COL: gk_RandomPermute(ncols, cperm, 1); for (i=0; i<20; i++) gk_RandomPermute(ncols, cperm, 0); for (i=0; i<nrows; i++) rperm[i] = i; break; case GK_CSR_ROWCOL: gk_RandomPermute(nrows, rperm, 1); for (i=0; i<20; i++) gk_RandomPermute(nrows, rperm, 0); if (symmetric) gk_icopy(nrows, rperm, cperm); else { gk_RandomPermute(ncols, cperm, 1); for (i=0; i<20; i++) gk_RandomPermute(ncols, cperm, 0); } break; default: gk_free((void **)&rperm, &cperm, LTERM); gk_errexit(SIGERR, "Unknown shuffling type of %d\n", what); return NULL; } nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = ncols; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Shuffle: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Shuffle: nrowind"); nrowval = nmat->rowval = (rowval ? gk_fmalloc(rowptr[nrows], "gk_csr_Shuffle: nrowval") : NULL) ; for (i=0; i<nrows; i++) nrowptr[rperm[i]] = rowptr[i+1]-rowptr[i]; MAKECSR(i, nrows, nrowptr); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { nrowind[nrowptr[rperm[i]]] = cperm[rowind[j]]; if (nrowval) nrowval[nrowptr[rperm[i]]] = rowval[j]; nrowptr[rperm[i]]++; } } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&rperm, &cperm, LTERM); return nmat; } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; } /*************************************************************************/ /*! This function finds the connected components in a graph. \param mat is the graph structure in CSR format \param cptr is the ptr structure of the CSR representation of the components. The length of this vector must be mat->nrows+1. \param cind is the indices structure of the CSR representation of the components. The length of this vector must be mat->nrows. \param cids is an array that stores the component # of each vertex of the graph. The length of this vector must be mat->nrows. \returns the number of components that it found. \note The cptr, cind, and cids parameters can be NULL, in which case only the number of connected components is returned. */ /*************************************************************************/ int gk_csr_FindConnectedComponents(gk_csr_t *mat, int32_t *cptr, int32_t *cind, int32_t *cids) { ssize_t i, ii, j, jj, k, nvtxs, first, last, ntodo, ncmps; ssize_t *xadj; int32_t *adjncy, *pos, *todo; int32_t mustfree_ccsr=0, mustfree_where=0; if (mat->nrows != mat->ncols) { fprintf(stderr, "gk_csr_FindComponents: The matrix needs to be square.\n"); return -1; } nvtxs = mat->nrows; xadj = mat->rowptr; adjncy = mat->rowind; /* Deal with NULL supplied cptr/cind vectors */ if (cptr == NULL) { cptr = gk_i32malloc(nvtxs+1, "gk_csr_FindComponents: cptr"); cind = gk_i32malloc(nvtxs, "gk_csr_FindComponents: cind"); mustfree_ccsr = 1; } /* The list of vertices that have not been touched yet. The valid entries are from [0..ntodo). */ todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: todo")); /* For a vertex that has not been visited, pos[i] is the position in the todo list that this vertex is stored. If a vertex has been visited, pos[i] = -1. */ pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: pos")); /* Find the connected componends */ ncmps = -1; ntodo = nvtxs; /* All vertices have not been visited */ first = last = 0; /* Point to the first and last vertices that have been touched but not explored. These vertices are stored in cind[first]...cind[last-1]. */ while (first < last || ntodo > 0) { if (first == last) { /* Find another starting vertex */ cptr[++ncmps] = first; /* Mark the end of the current CC */ /* put the first vertex in the todo list as the start of the new CC */ ASSERT(pos[todo[0]] != -1); cind[last++] = todo[0]; pos[todo[0]] = -1; todo[0] = todo[--ntodo]; pos[todo[0]] = 0; } i = cind[first++]; /* Get the first visited but unexplored vertex */ for (j=xadj[i]; j<xadj[i+1]; j++) { k = adjncy[j]; if (pos[k] != -1) { cind[last++] = k; /* Remove k from the todo list and put the last item in the todo list at the position that k was so that the todo list will be consequtive. The pos[] array is updated accordingly to keep track the location of the vertices in the todo[] list. */ todo[pos[k]] = todo[--ntodo]; pos[todo[pos[k]]] = pos[k]; pos[k] = -1; } } } cptr[++ncmps] = first; /* see if we need to return cids */ if (cids != NULL) { for (i=0; i<ncmps; i++) { for (j=cptr[i]; j<cptr[i+1]; j++) cids[cind[j]] = i; } } if (mustfree_ccsr) gk_free((void **)&cptr, &cind, LTERM); gk_free((void **)&pos, &todo, LTERM); return (int) ncmps; } /*************************************************************************/ /*! Returns a symmetric version of a square matrix. The symmetric version is constructed by applying an A op A^T operation, where op is one of GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, GK_CSR_SYM_AVG. \param mat the matrix to be symmetrized, \param op indicates the operation to be performed. The possible values are GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, and GK_CSR_SYM_AVG. \returns the symmetrized matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_MakeSymmetric(gk_csr_t *mat, int op) { ssize_t i, j, k, nnz; int nrows, nadj, hasvals; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind, *marker, *ids; float *rowval=NULL, *colval=NULL, *nrowval=NULL, *wgts=NULL; gk_csr_t *nmat; if (mat->nrows != mat->ncols) { fprintf(stderr, "gk_csr_MakeSymmetric: The matrix needs to be square.\n"); return NULL; } hasvals = (mat->rowval != NULL); nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; if (hasvals) rowval = mat->rowval; /* create the column view for efficient processing */ colptr = gk_zsmalloc(nrows+1, 0, "colptr"); colind = gk_i32malloc(rowptr[nrows], "colind"); if (hasvals) colval = gk_fmalloc(rowptr[nrows], "colval"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) colptr[rowind[j]]++; } MAKECSR(i, nrows, colptr); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { colind[colptr[rowind[j]]] = i; if (hasvals) colval[colptr[rowind[j]]] = rowval[j]; colptr[rowind[j]]++; } } SHIFTCSR(i, nrows, colptr); nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_MakeSymmetric: nrowptr"); nrowind = nmat->rowind = gk_imalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowind"); if (hasvals) nrowval = nmat->rowval = gk_fmalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowval"); marker = gk_ismalloc(nrows, -1, "marker"); ids = gk_imalloc(nrows, "ids"); if (hasvals) wgts = gk_fmalloc(nrows, "wgts"); nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { nadj = 0; /* out-edges */ for (j=rowptr[i]; j<rowptr[i+1]; j++) { ids[nadj] = rowind[j]; if (hasvals) wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*rowval[j] : rowval[j]); marker[rowind[j]] = nadj++; } /* in-edges */ for (j=colptr[i]; j<colptr[i+1]; j++) { if (marker[colind[j]] == -1) { if (op != GK_CSR_SYM_MIN) { ids[nadj] = colind[j]; if (hasvals) wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*colval[j] : colval[j]); nadj++; } } else { if (hasvals) { switch (op) { case GK_CSR_SYM_MAX: wgts[marker[colind[j]]] = gk_max(colval[j], wgts[marker[colind[j]]]); break; case GK_CSR_SYM_MIN: wgts[marker[colind[j]]] = gk_min(colval[j], wgts[marker[colind[j]]]); break; case GK_CSR_SYM_SUM: wgts[marker[colind[j]]] += colval[j]; break; case GK_CSR_SYM_AVG: wgts[marker[colind[j]]] = 0.5*(wgts[marker[colind[j]]] + colval[j]); break; default: errexit("Unsupported op for MakeSymmetric!\n"); } } marker[colind[j]] = -1; } } /* go over out edges again to resolve any edges that were not found in the in * edges */ for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (marker[rowind[j]] != -1) { if (op == GK_CSR_SYM_MIN) ids[marker[rowind[j]]] = -1; marker[rowind[j]] = -1; } } /* put the non '-1' entries in ids[] into i's row */ for (j=0; j<nadj; j++) { if (ids[j] != -1) { nrowind[nnz] = ids[j]; if (hasvals) nrowval[nnz] = wgts[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&colptr, &colind, &colval, &marker, &ids, &wgts, LTERM); return nmat; }
relu6_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include <math.h> int ref_relu6_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int batch = input_tensor->dims[0]; int size = h * w; int c_step = h * w; int batch_step = c_step * channels; int total_size = batch_step * batch; // dequant uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; float* data_fp32 = sys_malloc(total_size * sizeof(float)); for(int i = 0; i < total_size; i++) data_fp32[i] = ((float) input_uint8[i] - (float)input_zero) * input_scale; for (int n = 0; n < batch; n++) { //#pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = data_fp32 + batch_step * n + c_step * q; float* dst = data_fp32 + batch_step * n + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (src[i] > 6) dst[i] = 6; else if(src[i] < 0) dst[i] = 0; } } } // quant for(int i=0; i<total_size; i++) { int udata = round(data_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(data_fp32); return 0; } int ref_relu6_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (dst[i] > 6) dst[i] = 6; if (dst[i] < 0) dst[i] = 0; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_relu6_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_relu6_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* node = exec_node->ir_node; struct ir_graph* ir_graph = node->graph; struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_relu6_hcl_ops(void* arg) { return register_builtin_node_ops(OP_RELU6, &hcl_node_ops); } static int unreg_relu6_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_RELU6, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_relu6_hcl_ops); AUTO_UNREGISTER_OPS(unreg_relu6_hcl_ops);
GB_unaryop__ainv_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_int64 // op(A') function: GB_tran__ainv_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_int64 ( float *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
VolumetricConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c" #else static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); int ndim = input->nDimension; int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimt++; dimh++; dimw++; } long nInputPlane; long inputDepth; long inputHeight; long inputWidth; long nOutputPlane; long outputDepth; long outputHeight; long outputWidth; nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1) { THError( "Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nInputPlane, inputDepth, inputHeight, inputWidth, nOutputPlane, outputDepth, outputHeight, outputWidth ); } THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4, "weight tensor should be 2D or 5D - got %d", weight->nDimension); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(view_weight)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->nDimension == 5) { long s1 = weight->size[0]; long s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset, s1, -1, s2, -1); THTensor_(free)(old_weight); } return weight; } /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void THNN_(unfolded_acc_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long outputDepth, long outputWidth, long outputHeight) { long nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); //#pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { long kt, kw, kh, t, y, x, it, ix, iy; for (kt = 0; kt < kT; kt++) { for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { real *src = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) { } else { real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } } } } } static void THNN_(unfolded_copy_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long outputDepth, long outputWidth, long outputHeight) { long k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); // #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kT*kH*kW; k++) { long nip = k / (kT*kH*kW); long rest = k % (kT*kH*kW); long kt = rest / (kH*kW); rest = rest % (kH*kW); long kh = rest / kW; long kw = rest % kW; long t,x,y,it,ix,iy; real *dst = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *src = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1)); else memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } } } static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long nOutputPlane, long outputDepth, long outputWidth, long outputHeight) { long i; THTensor *output2d; THNN_(unfolded_copy_vol)( finput, input, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, outputDepth, outputWidth, outputHeight ); output2d = THTensor_(newWithStorage2d)( output->storage, output->storageOffset, nOutputPlane, -1, outputDepth*outputHeight*outputWidth, -1 ); if (bias) { for (i = 0; i < nOutputPlane; i++) { THVector_(fill)( output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputDepth*outputHeight*outputWidth ); } } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } void THNN_(VolumetricConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; long nInputPlane; long inputDepth; long inputHeight; long inputWidth; long nOutputPlane; long outputDepth; long outputHeight; long outputWidth; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, NULL, weight, bias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); if (input->nDimension == 5) { dimf++; dimt++; dimh++; dimw++; } nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; weight = THNN_(view_weight)(weight); if (input->nDimension == 4) { THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); } else { long T = input->size[0]; long t; THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); // #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input_t, output_t, weight, bias, finput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_vol)( fgradInput, gradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH, gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2], gradOutput->size[1], gradOutput->size[3], gradOutput->size[2] ); } void THNN_(VolumetricConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int nOutputPlane = (int)weight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, weight, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THNN_(view_weight)(weight); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if (input->nDimension == 4) { THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput, gradOutput, tweight, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH ); } else { long T = input->size[0]; long t; //#pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput_t, gradOutput_t, tweight, fgradInput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH ); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { long i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); THTensor_(free)(tfinput); if (gradBias) { for (i = 0; i < gradBias->size[0]; i++) { long k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for (k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum; } } THTensor_(free)(gradOutput2d); } void THNN_(VolumetricConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int nOutputPlane = (int)gradWeight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); gradWeight = THNN_(view_weight)(gradWeight); if (input->nDimension == 4) // non-batch mode { THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else // batch mode { long T = input->size[0]; long t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(gradWeight); } #endif
nvptx_asm_delayed_diags.c
// RUN: %clang_cc1 -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify -DDIAGS -DIMMEDIATE -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // RUN: %clang_cc1 -verify -DDIAGS -DDELAYED -fopenmp -x c -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -fsyntax-only -Wuninitialized // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target #ifndef DIAGS // expected-no-diagnostics #endif // DIAGS #ifdef IMMEDIATE #pragma omp declare target #endif //IMMEDIATE void t1(int r) { #ifdef DIAGS // expected-error@+4 {{invalid input constraint 'mx' in asm}} #endif // DIAGS __asm__("PR3908 %[lf] %[xx] %[li] %[r]" : [ r ] "+r"(r) : [ lf ] "mx"(0), [ li ] "mr"(0), [ xx ] "x"((double)(0))); } unsigned t2(signed char input) { unsigned output; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=a' in asm}} #endif // DIAGS __asm__("xyz" : "=a"(output) : "0"(input)); return output; } double t3(double x) { register long double result; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=t' in asm}} #endif // DIAGS __asm __volatile("frndint" : "=t"(result) : "0"(x)); return result; } unsigned char t4(unsigned char a, unsigned char b) { unsigned int la = a; unsigned int lb = b; unsigned int bigres; unsigned char res; #ifdef DIAGS // expected-error@+3 {{invalid output constraint '=la' in asm}} #endif // DIAGS __asm__("0:\n1:\n" : [ bigres ] "=la"(bigres) : [ la ] "0"(la), [ lb ] "c"(lb) : "edx", "cc"); res = bigres; return res; } void t5(void) { #ifdef DIAGS // expected-error@+6 {{unknown register name 'st' in asm}} #endif // DIAGS __asm__ __volatile__( "finit" : : : "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "fpsr", "fpcr"); } typedef long long __m256i __attribute__((__vector_size__(32))); void t6(__m256i *p) { #ifdef DIAGS // expected-error@+3 {{unknown register name 'ymm0' in asm}} #endif // DIAGS __asm__ volatile("vmovaps %0, %%ymm0" ::"m"(*(__m256i *)p) : "ymm0"); } #ifdef IMMEDIATE #pragma omp end declare target #endif //IMMEDIATE int main() { #ifdef DELAYED #pragma omp target #endif // DELAYED { #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t1(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t2(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t3(0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t4(0, 0); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t5(); #ifdef DELAYED // expected-note@+2 {{called by 'main'}} #endif // DELAYED t6(0); } return 0; }
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <dmlc/serializer.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 9; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; // NOLINT /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! \brief default constructor */ MetaInfo() = default; MetaInfo(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo const& that) { this->num_row_ = that.num_row_; this->num_col_ = that.num_col_; this->num_nonzero_ = that.num_nonzero_; this->labels_.Resize(that.labels_.Size()); this->labels_.Copy(that.labels_); this->group_ptr_ = that.group_ptr_; this->weights_.Resize(that.weights_.Size()); this->weights_.Copy(that.weights_); this->base_margin_.Resize(that.base_margin_.Size()); this->base_margin_.Copy(that.base_margin_); this->labels_lower_bound_.Resize(that.labels_lower_bound_.Size()); this->labels_lower_bound_.Copy(that.labels_lower_bound_); this->labels_upper_bound_.Resize(that.labels_upper_bound_.Size()); this->labels_upper_bound_.Copy(that.labels_upper_bound_); return *this; } /*! * \brief Validate all metainfo. */ void Validate(int32_t device) const; MetaInfo Slice(common::Span<int32_t const> ridxs) const; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); virtual DMatrix* Slice(common::Span<int32_t const> ridxs) = 0; /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); namespace serializer { template <> struct Handler<xgboost::Entry> { inline static void Write(Stream* strm, const xgboost::Entry& data) { strm->Write(data.index); strm->Write(data.fvalue); } inline static bool Read(Stream* strm, xgboost::Entry* data) { return strm->Read(&data->index) && strm->Read(&data->fvalue); } }; } // namespace serializer } // namespace dmlc #endif // XGBOOST_DATA_H_
omp_task.c
<ompts:test> <ompts:testdescription>Test which checks the omp task directive. The idea of the tests is to generate a set of tasks in a single region. We let pause the tasks generated so that other threads get sheduled to the newly opened tasks.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp task</ompts:directive> <ompts:dependences>omp single</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_task</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int tids[NUM_TASKS]; int i; </ompts:orphan:vars> #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { <ompts:orphan> /* First we have to store the value of the loop index in a new variable * which will be private for each task because otherwise it will be overwritten * if the execution of the task takes longer than the time which is needed to * enter the next step of the loop! */ int myi; myi = i; <ompts:check>#pragma omp task</ompts:check> { my_sleep (SLEEPTIME); tids[myi] = omp_get_thread_num(); } /* end of omp task */ </ompts:orphan> } /* end of for */ } /* end of single */ } /*end of parallel */ /* Now we ckeck if more than one thread executed the tasks. */ for (i = 1; i < NUM_TASKS; i++) { if (tids[0] != tids[i]) return 1; } return 0; } /* end of check_parallel_for_private */ </ompts:testcode> </ompts:test>
GB_binop__bclr_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int8) // C=scalar+B GB (_bind1st__bclr_int8) // C=scalar+B' GB (_bind1st_tran__bclr_int8) // C=A+scalar GB (_bind2nd__bclr_int8) // C=A'+scalar GB (_bind2nd_tran__bclr_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITCLR (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT8 || GxB_NO_BCLR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bclr_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
es4.h
#ifndef es4_h #define es4_h #include <iostream> #include <omp.h> #include <cstdlib> using namespace std; void output(bool find, double time) { if (find) { cout << endl << "Trovato in: " << time << endl; return; } cout << endl << "Non trovato" << endl; } void generate(int *vec, int dimension, unsigned nmt) { srand(time(NULL)); #pragma omp parallel for num_threads(nmt) for (int i = 0; i < dimension; i++) { vec[i] = rand() % 1000 + 1; } } void search(int *vec, int dimension, int numberToSearch, unsigned nmt) { bool find = false; if (numberToSearch < 0 || numberToSearch > 1000) { cout << endl << "Numero non valido!" << endl; } double start = omp_get_wtime(); #pragma omp parallel num_threads(nmt) { #pragma omp for for (unsigned long i = 0; i < dimension; i++) { #pragma omp cancellation point for if (vec[i] == numberToSearch) { #pragma omp atomic write find = true; #pragma omp cancel for } } } double end = omp_get_wtime(); output(find, end - start); } void es4() { cout << "Inserisci numero threads" << endl; unsigned nmt; cin >> nmt; cout << endl << "Inserisci dimensione dell'array" << endl; int dimension; cin >> dimension; int *vec = new int[dimension]; cout << endl << "Genero con numeri random tra 1 e 1000..." << endl; generate(vec, dimension, nmt); cout << endl << "Inserisci numero da cercare" << endl; int numberToSearch; cin >> numberToSearch; search(vec, dimension, numberToSearch, nmt); delete [] vec; } #endif
kncmpush3.c
/* KNC C Library for Skeleton 3D Electrostatic OpenMP/Vector PIC Code */ /* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include <string.h> #include <immintrin.h> #include "kncmpush3.h" /*--------------------------------------------------------------------*/ void ckncgppush3lt(float ppart[], float fxyz[], int kpic[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space OpenMP/vector version using guard cells data read in tiles particles stored segmented array 94 flops/particle, 30 loads, 6 stores input: all, output: ppart, ek equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt, vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt, z(t+dt) = z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l)) + dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1)) + dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1))) fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l)) + dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1)) + dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+ (vz(t+dt/2)+vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field array, must be >= nx+1 nyv = third dimension of field array, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, mxv, myv, mxyv, nxyv; float qtm, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, x, y, z, dx, dy, dz; float vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_perm; __m512 v_qtm, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, p, q, r, s; __m512d v_sum1, v_d; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtm = qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtm = _mm512_set1_ps(qtm); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_dt = _mm512_set1_ps(dt); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,x,y,z,dxp,dyp, \ dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,sum1,v_noff,v_moff,v_loff,v_nn, \ v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1, \ v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,p,q,r,s, \ msk,kk,dd,sfxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; nps = 4*(nn/4); for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of acceleration */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of acceleration */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nn,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of acceleration */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of acceleration */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* new velocity */ /* dxp = ppart[j+3*nppmx+npoff]; */ /* dyp = ppart[j+4*nppmx+npoff]; */ /* dzp = ppart[j+5*nppmx+npoff]; */ v_dxp = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_dyp = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_dzp = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* vx = dxp + qtm*dx; */ /* vy = dyp + qtm*dy; */ /* vz = dzp + qtm*dz; */ v_vx = _mm512_fmadd_ps(v_qtm,v_dx,v_dxp); v_vy = _mm512_fmadd_ps(v_qtm,v_dy,v_dyp); v_vz = _mm512_fmadd_ps(v_qtm,v_dz,v_dzp); /* average kinetic energy */ /* dxp += vx; */ /* dyp += vy; */ /* dzp += vz; */ v_dxp = _mm512_add_ps(v_dxp,v_vx); v_dyp = _mm512_add_ps(v_dyp,v_vy); v_dzp = _mm512_add_ps(v_dzp,v_vz); /* sum1 += dxp*dxp + dyp*dyp + dzp*dzp; */ v_at = _mm512_mul_ps(v_dxp,v_dxp); v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dyp,v_dyp)); v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dzp,v_dzp)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* new position */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* vz = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if (dz < edgelz) dz += edgerz; */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); v_dz = _mm512_mask_add_ps(v_dz,msk,v_dz,v_edgerz); /* if (dz >= edgerz) dz -= edgerz; */ msk = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); v_dz = _mm512_mask_sub_ps(v_dz,msk,v_dz,v_edgerz); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find acceleration */ dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; dx = amz*(dx + dyp*sfxyz[nn+4*mxv] + dx1*sfxyz[nn+4*mxv+4]); dy = amz*(dy + dyp*sfxyz[nn+4*mxv+1] + dx1*sfxyz[nn+4*mxv+1+4]); dz = amz*(dz + dyp*sfxyz[nn+4*mxv+2] + dx1*sfxyz[nn+4*mxv+2+4]); mm = nn + 4*mxyv; vx = amx*sfxyz[mm] + amy*sfxyz[mm+4]; vy = amx*sfxyz[mm+1] + amy*sfxyz[mm+1+4]; vz = amx*sfxyz[mm+2] + amy*sfxyz[mm+2+4]; dx = dx + dzp*(vx + dyp*sfxyz[mm+4*mxv] + dx1*sfxyz[mm+4*mxv+4]); dy = dy + dzp*(vy + dyp*sfxyz[mm+4*mxv+1] + dx1*sfxyz[mm+4*mxv+1+4]); dz = dz + dzp*(vz + dyp*sfxyz[mm+4*mxv+2] + dx1*sfxyz[mm+4*mxv+2+4]); /* new velocity */ dxp = ppart[j+3*nppmx+npoff]; dyp = ppart[j+4*nppmx+npoff]; dzp = ppart[j+5*nppmx+npoff]; vx = dxp + qtm*dx; vy = dyp + qtm*dy; vz = dzp + qtm*dz; /* average kinetic energy */ dxp += vx; dyp += vy; dzp += vz; sum1 += dxp*dxp + dyp*dyp+ dzp*dzp; /* new position */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; vz = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgppushf3lt(float ppart[], float fxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with periodic boundary conditions. also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data read in tiles particles stored segmented array 94 flops/particle, 30 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc equations used are: vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t),z(t))*dt, vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t),z(t))*dt, vz(t+dt/2) = vz(t-dt/2) + (q/m)*fz(x(t),y(t),z(t))*dt, where q/m is charge/mass, and x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt, z(t+dt) = z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) fy(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fy(n,m,l)+dx*fy(n+1,m,l)) + dy*((1-dx)*fy(n,m+1,l) + dx*fy(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fy(n,m,l+1)+dx*fy(n+1,m,l+1)) + dy*((1-dx)*fy(n,m+1,l+1) + dx*fy(n+1,m+1,l+1))) fz(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fz(n,m,l)+dx*fz(n+1,m,l)) + dy*((1-dx)*fz(n,m+1,l) + dx*fz(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fz(n,m,l+1)+dx*fz(n+1,m,l+1)) + dy*((1-dx)*fz(n,m+1,l+1) + dx*fz(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive calculations kinetic energy/mass at time t is also calculated, using ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2+ (vz(t+dt/2)+vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field array, must be >= nx+1 nyv = third dimension of field array, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ii, ih, nh, nn, mm, ll, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float qtm, dxp, dyp, dzp, amx, amy, amz, dx1, x, y, z, dx, dy, dz; float vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_qtm, v_dt, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, p, q, r, s; __m512d v_sum1, v_d; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtm = qbm*dt; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtm = _mm512_set1_ps(qtm); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_dt = _mm512_set1_ps(dt); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,x,y,z, \ dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,vx,vy,vz,edgelx,edgely,edgelz, \ edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x, \ v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \ v_vy,v_vz,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz, \ v_d,v_sum1,a,b,c,d,e,f,g,p,q,r,s,msk1,msk2,kk,dd,sfxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ nps = 4*(nn/4); for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of acceleration */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of acceleration */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nn,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of acceleration */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of acceleration */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* new velocity */ /* dxp = ppart[j+3*nppmx+npoff]; */ /* dyp = ppart[j+4*nppmx+npoff]; */ /* dzp = ppart[j+5*nppmx+npoff]; */ v_dxp = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_dyp = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_dzp = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* vx = dxp + qtm*dx; */ /* vy = dyp + qtm*dy; */ /* vz = dzp + qtm*dz; */ v_vx = _mm512_fmadd_ps(v_qtm,v_dx,v_dxp); v_vy = _mm512_fmadd_ps(v_qtm,v_dy,v_dyp); v_vz = _mm512_fmadd_ps(v_qtm,v_dz,v_dzp); /* average kinetic energy */ /* dxp += vx; */ /* dyp += vy; */ /* dzp += vz; */ v_dxp = _mm512_add_ps(v_dxp,v_vx); v_dyp = _mm512_add_ps(v_dyp,v_vy); v_dzp = _mm512_add_ps(v_dzp,v_vz); /* sum1 += dxp*dxp + dyp*dyp + dzp*dzp; */ v_at = _mm512_mul_ps(v_dxp,v_dxp); v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dyp,v_dyp)); v_at = _mm512_add_ps(v_at,_mm512_mul_ps(v_dzp,v_dzp)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* new position */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find acceleration */ dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; dx = amz*(dx + dyp*sfxyz[nn+4*mxv] + dx1*sfxyz[nn+4*mxv+4]); dy = amz*(dy + dyp*sfxyz[nn+4*mxv+1] + dx1*sfxyz[nn+4*mxv+1+4]); dz = amz*(dz + dyp*sfxyz[nn+4*mxv+2] + dx1*sfxyz[nn+4*mxv+2+4]); mm = nn + 4*mxyv; vx = amx*sfxyz[mm] + amy*sfxyz[mm+4]; vy = amx*sfxyz[mm+1] + amy*sfxyz[mm+1+4]; vz = amx*sfxyz[mm+2] + amy*sfxyz[mm+2+4]; dx = dx + dzp*(vx + dyp*sfxyz[mm+4*mxv] + dx1*sfxyz[mm+4*mxv+4]); dy = dy + dzp*(vy + dyp*sfxyz[mm+4*mxv+1] + dx1*sfxyz[mm+4*mxv+1+4]); dz = dz + dzp*(vz + dyp*sfxyz[mm+4*mxv+2] + dx1*sfxyz[mm+4*mxv+2+4]); /* new velocity */ dxp = ppart[j+3*nppmx+npoff]; dyp = ppart[j+4*nppmx+npoff]; dzp = ppart[j+5*nppmx+npoff]; vx = dxp + qtm*dx; vy = dyp + qtm*dy; vz = dzp + qtm*dz; /* average kinetic energy */ dxp += vx; dyp += vy; dzp += vz; sum1 += dxp*dxp + dyp*dyp+ dzp*dzp; /* new position */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += 0.125f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __m512 a, b, c, d, e, f, g, h, qp, qr; __mmask16 msk, msks, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \ d,e,f,g,h,qp,qr,msk,msks,kk,sq) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* d = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ /* deposit charge for two particles at a time */ for (i = 0; i < 8; i++) { /* first particle */ mm = kk[2*i]; msk = _mm512_int2mask(3<<(2*i)); msks = _mm512_int2mask(2<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks, (__m512i)b,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks, (__m512i)d,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks, (__m512i)f,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks, (__m512i)h,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); /* second particle */ mm = kk[2*i+1]; msks = _mm512_int2mask(1<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks, (__m512i)a,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks, (__m512i)c,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks, (__m512i)e,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks, (__m512i)g,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 16 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __mmask16 v_m; __attribute__((aligned(64))) unsigned int kk[16]; typedef union vfloat {float v[16]; __m512 v16;} vf; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ vf vv[8]; mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* vector loop over particles in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* x = amx*amz; */ /* y = amy*amz; */ /* z = dyp*amz; */ /* w = dx1*amz; */ vv[0].v16 = _mm512_mul_ps(v_amx,v_amz); vv[1].v16 = _mm512_mul_ps(v_amy,v_amz); vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz); vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz); vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp); vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp); vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp); vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ for (i = 0; i < 16; i++) { nn = kk[i]; x = sq[nn] + vv[0].v[i]; y = sq[nn+1] + vv[1].v[i]; z = sq[nn+mxv] + vv[2].v[i]; w = sq[nn+1+mxv] + vv[3].v[i]; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + vv[4].v[i]; y = sq[mm+1] + vv[5].v[i]; z = sq[mm+mxv] + vv[6].v[i]; w = sq[mm+1+mxv] + vv[7].v[i]; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dx, dy, dz; int ks[26]; __m512i v_ist, v_it, v_0, v_1, v_3, v_9; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_anx, v_any, v_anz; __m512 v_dx, v_dy, v_dz, v_x; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 v_zero; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; anx = (float) nx; any = (float) ny; anz = (float) nz; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_zero = _mm512_setzero_ps(); /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \ dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \ msk2,ls) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; noff = (ntmax+1)*l; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* dx = ppart[j+npoff]; */ /* dy = ppart[j+nppmx+npoff]; */ /* dz = ppart[j+2*nppmx+npoff]; */ v_dx = _mm512_load_ps(&ppart[j+npoff]); v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* find particles going out of bounds */ /* ist = 0; */ v_ist = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* ist = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* ist = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* ist = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* ist += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* ist += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* ist += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* ist += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* ist += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* ist += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } } /* increment counters */ /* if (ist > 0) { */ /* ncl[ist+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(ls,v_ist); for (i = 0; i < 16; i++) { ist = ls[i]; if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + i + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { dx = ppart[j+npoff]; dy = ppart[j+nppmx+npoff]; dz = ppart[j+2*nppmx+npoff]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+npoff] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[j+npoff] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx+npoff] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[j+nppmx+npoff] = dy; } else { ist += 3; } } if (dz >= edgerz) { if (dz >= anz) ppart[j+2*nppmx+npoff] = dz - anz; ist += 18; } else if (dz < edgelz) { if (dz < 0.0) { dz += anz; if (dz < anz) ist += 9; else dz = 0.0; ppart[j+2*nppmx+npoff] = dz; } else { ist += 9; } } if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*noff] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the ckncgppushf3lt subroutine. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; int ks[26]; __m512i v_it, v_0, v_1; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_x, v_zero; __mmask16 msk1; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_zero = _mm512_setzero_ps(); /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* replicate extended periodic vector field fxyz linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, fxyz needs to be 64 byte aligned nxe needs to be a multiple of 4 local data */ #define N 4 int j, k, l, nxs, nxyen, ll; nxs = 4*(nx/4); nxyen = N*nxe*nye; /* copy edges of extended field */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxyen*l; for (k = 0; k < ny; k++) { fxyz[N*nx+N*nxe*k+ll] = fxyz[N*nxe*k+ll]; fxyz[1+N*nx+N*nxe*k+ll] = fxyz[1+N*nxe*k+ll]; fxyz[2+N*nx+N*nxe*k+ll] = fxyz[2+N*nxe*k+ll]; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+ll], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+ll])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*ny+ll] = fxyz[N*j+ll]; fxyz[1+N*j+N*nxe*ny+ll] = fxyz[1+N*j+ll]; fxyz[2+N*j+N*nxe*ny+ll] = fxyz[2+N*j+ll]; } fxyz[N*nx+N*nxe*ny+ll] = fxyz[ll]; fxyz[1+N*nx+N*nxe*ny+ll] = fxyz[1+ll]; fxyz[2+N*nx+N*nxe*ny+ll] = fxyz[2+ll]; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*k+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j+N*nxe*k])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*k+nxyen*nz] = fxyz[N*j+N*nxe*k]; fxyz[1+N*j+N*nxe*k+nxyen*nz] = fxyz[1+N*j+N*nxe*k]; fxyz[2+N*j+N*nxe*k+nxyen*nz] = fxyz[2+N*j+N*nxe*k]; } fxyz[N*nx+N*nxe*k+nxyen*nz] = fxyz[N*nxe*k]; fxyz[1+N*nx+N*nxe*k+nxyen*nz] = fxyz[1+N*nxe*k]; fxyz[2+N*nx+N*nxe*k+nxyen*nz] = fxyz[2+N*nxe*k]; } } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[N*j+N*nxe*ny+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[N*j])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[N*j+N*nxe*ny+nxyen*nz] = fxyz[N*j]; fxyz[1+N*j+N*nxe*ny+nxyen*nz] = fxyz[1+N*j]; fxyz[2+N*j+N*nxe*ny+nxyen*nz] = fxyz[2+N*j]; } fxyz[N*nx+N*nxe*ny+nxyen*nz] = fxyz[0]; fxyz[1+N*nx+N*nxe*ny+nxyen*nz] = fxyz[1]; fxyz[2+N*nx+N*nxe*ny+nxyen*nz] = fxyz[2]; return; #undef N } /*--------------------------------------------------------------------*/ void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic scalar field q linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, q needs to be 64 byte aligned nxe needs to be a multiple of 16 local data */ int j, k, l, nxs, nxye, ll; __m512 v_q; nxs = 16*(nx/16); nxye = nxe*nye; /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll,v_q) for (l = 0; l < nz; l++) { ll = nxye*l; for (k = 0; k < ny; k++) { q[nxe*k+ll] += q[nx+nxe*k+ll]; q[nx+nxe*k+ll] = 0.0; } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+ll]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q); _mm512_store_ps(&q[j+ll],v_q); _mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+ll] += q[j+nxe*ny+ll]; q[j+nxe*ny+ll] = 0.0; } q[ll] += q[nx+nxe*ny+ll]; q[nx+nxe*ny+ll] = 0.0; } #pragma omp for \ private(j,k,v_q) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q); _mm512_store_ps(&q[j+nxe*k],v_q); _mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+nxe*k] += q[j+nxe*k+nxye*nz]; q[j+nxe*k+nxye*nz] = 0.0; } q[nxe*k] += q[nx+nxe*k+nxye*nz]; q[nx+nxe*k+nxye*nz] = 0.0; } } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q); _mm512_store_ps(&q[j],v_q); _mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j] += q[j+nxe*ny+nxye*nz]; q[j+nxe*ny+nxye*nz] = 0.0; } q[0] += q[nx+nxe*ny+nxye*nz]; q[nx+nxe*ny+nxye*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void ckncmpois33(float complex q[], float complex fxyz[], int isign, float complex ffc[], float ax, float ay, float az, float affp, float *we, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. for isign = 0, output: ffc input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd for isign = -1, output: fxyz, we input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd approximate flop count is: 59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated equation used is: fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx], fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx], fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx], s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0, fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0, fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0, fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0. q[l][k][j] = complex charge density for fourier mode (j,k,l) fxyz[l][k][j][0] = x component of complex force/charge fxyz[l][k][j][1] = y component of complex force/charge fxyz[l][k][j][2] = z component of complex force/charge all for fourier mode (j,k,l) cimag(ffc[l][k][j]) = finite-size particle shape factor s for fourier mode (j,k,l) creal(ffc[l][k][j]) = potential green's function g for fourier mode (j,k,l) ax/ay/az = half-width of particle in x/y/z direction affp = normalization constant = nx*ny*nz/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))* |q[kz][ky][kx]*s[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nzv = third dimension of field arrays, must be >= nz nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh requires KNC, q, fxy, ffc need to be 64 byte aligned nxhd, nxvh need to be a multiple of 8 fxyz needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6; float complex zero, zt1, zt2; double wp, sum1, sum2; __m512i v_j, v_it, v_perm; __m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4; __m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4; __m512 a, b, c, d, e, f, g, h; __m512d v_wp, v_d; __attribute__((aligned(64))) double dd[8]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 8*(nxh/8); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0); if (isign != 0) goto L40; /* prepare form factor array */ for (l = 0; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; at1 = dkz*dkz; at2 = pow((dkz*az),2); for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at3 = dky*dky + at1; at4 = pow((dky*ay),2) + at2; for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at5 = dkx*dkx + at3; at6 = exp(-0.5*(pow((dkx*ax),2) + at4)); if (at5==0.0) { ffc[j+kk+ll] = affp + 1.0*_Complex_I; } else { ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I; } } } } return; /* calculate force/charge and sum field energy */ L40: sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \ v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \ c,d,e,f,g,h,v_d,v_wp,dd) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps( _mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at3 = dky*at1; */ v_at3 = _mm512_mul_ps(v_dky,v_at1); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+lj)] = at2*zt1; */ /* fxyz[1+4*(j+kj+lj)] = at3*zt1; */ /* fxyz[2+4*(j+kj+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d); /* fxyz[4*(j+k1+lj)] = at2*zt2; */ /* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+lj)] = at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_mul_ps(v_at4,v_zt2); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d); /* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */ /* + q[j+k1+lj]*conjf(q[j+k1+lj])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+l1)] = at2*zt1; */ /* fxyz[1+4*(j+kj+l1)] = at3*zt1; */ /* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d); /* fxyz[4*(j+k1+l1)] = at2*zt2; */ /* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d); /* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */ /* + q[j+k1+l1]*conjf(q[j+k1+l1])); */ v_zt4 = _mm512_mul_ps(v_zt1,v_zt1); v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; fxyz[4*(j+kj+lj)] = at2*zt1; fxyz[1+4*(j+kj+lj)] = at3*zt1; fxyz[2+4*(j+kj+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = at2*zt2; fxyz[1+4*(j+k1+lj)] = -at3*zt2; fxyz[2+4*(j+k1+lj)] = at4*zt2; zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; fxyz[4*(j+kj+l1)] = at2*zt1; fxyz[1+4*(j+kj+l1)] = at3*zt1; fxyz[2+4*(j+kj+l1)] = -at4*zt1; fxyz[4*(j+k1+l1)] = at2*zt2; fxyz[1+4*(j+k1+l1)] = -at3*zt2; fxyz[2+4*(j+k1+l1)] = -at4*zt2; at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) + q[j+k1+lj]*conjf(q[j+k1+lj]) + q[j+kj+l1]*conjf(q[j+kj+l1]) + q[j+k1+l1]*conjf(q[j+k1+l1])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I; zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I; fxyz[4*(kj+lj)] = zero; fxyz[1+4*(kj+lj)] = at3*zt1; fxyz[2+4*(kj+lj)] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = at3*zt2; fxyz[2+4*(kj+l1)] = -at4*zt2; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj+lj]*conjf(q[kj+lj]) + q[kj+l1]*conjf(q[kj+l1])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+lj)] = at2*zt1; */ /* fxyz[1+4*(j+lj)] = zero; */ /* fxyz[2+4*(j+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d); /* fxyz[4*(j+k1+lj)] = zero; */ /* fxyz[1+4*(j+k1+lj)] = zero; */ /* fxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero); /* fxyz[4*(j+l1)] = at2*zt2; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = v_zero; c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j+lj]*conjf(q[j+lj]) */ /* + q[j+l1]*conjf(q[j+l1])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; fxyz[4*(j+lj)] = at2*zt1; fxyz[1+4*(j+lj)] = zero; fxyz[2+4*(j+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = zero; fxyz[1+4*(j+k1+lj)] = zero; fxyz[2+4*(j+k1+lj)] = zero; fxyz[4*(j+l1)] = at2*zt2; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = -at4*zt2; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+lj]*conjf(q[j+lj]) + q[j+l1]*conjf(q[j+l1])); wp += (double) at1; } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[ll])*cimagf(ffc[ll]); at4 = dkz*at1; zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I; fxyz[4*lj] = zero; fxyz[1+4*lj] = zero; fxyz[2+4*lj] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[lj]*conjf(q[lj])); wp += (double) at1; /* sum1 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum1 += (wp + dd[0]); } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; for (j = 1; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxyz[4*(j+kj)] = at2*zt1; fxyz[1+4*(j+kj)] = at3*zt1; fxyz[2+4*(j+kj)] = zero; fxyz[4*(j+k1)] = at2*zt2; fxyz[1+4*(j+k1)] = -at3*zt2; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+kj+l1)] = zero; fxyz[1+4*(j+kj+l1)] = zero; fxyz[2+4*(j+kj+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); wp += (double) at1; } sum2 += wp; } /* mode numbers kx = 0, nx/2 */ wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxyz[4*kj] = zero; fxyz[1+4*kj] = at3*zt1; fxyz[2+4*kj] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = zero; fxyz[2+4*(kj+l1)] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj]*conjf(q[kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j])*cimagf(ffc[j]); */ v_at1 = _mm512_load_ps((float *)&ffc[j]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero, v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero); } /* fxyz[4*j] = at2*zt1; */ /* fxyz[1+4*j] = zero; */ /* fxyz[2+4*j] = zero; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = v_zero; /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero, 177); b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero, 177); d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*j],a); _mm512_store_ps((float *)&fxyz[8+4*j],b); _mm512_store_ps((float *)&fxyz[16+4*j],c); _mm512_store_ps((float *)&fxyz[24+4*j],d); /* fxyz[4*(j+k1)] = zero; */ /* fxyz[1+4*(j+k1)] = zero; */ /* fxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero); /* fxyz[4*(j+l1)] = zero; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j]*conjf(q[j])); */ v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxyz[4*j] = at2*zt1; fxyz[1+4*j] = zero; fxyz[2+4*j] = zero; fxyz[4*(j+k1)] = zero; fxyz[1+4*(j+k1)] = zero; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+l1)] = zero; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j]*conjf(q[j])); wp += (double) at1; } fxyz[0] = zero; fxyz[1] = zero; fxyz[2] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); /* *we = wp*((float) nx)*((float) ny)*((float) nz); */ *we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncfft3rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in x and y is performed f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)* exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in x and y is performed f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd; int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float ani; float complex t1, t2, t3; __m512i v_j, v_kmr, v_m, v_n, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); nxhhs = 8*(nxhh/8); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L180; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \ v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[nxh-j+joff] = ani*conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[joff] + t1) + crealf(f[joff] - t1)*_Complex_I); f[joff] = 0.5*(crealf(f[joff] + t1) + cimagf(f[joff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L180: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \ v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[joff] - t1); f[joff] += t1; } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[nxh-j+joff] = conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } } for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rmz(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in z is performed f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, a forward fourier transform in z is performed f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff; int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1; int nss, nxhs; float complex t1, t2; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L90; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = f[l1]; f[l1] = 0.5*(cimagf(f[ll] + t1) + crealf(f[ll] - t1)*_Complex_I); f[ll] = 0.5*(crealf(f[ll] + t1) + cimagf(f[ll] - t1)*_Complex_I); } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = f[i1]; f[i1] = 0.5*(cimagf(f[i0] + t1) + crealf(f[i0] - t1)*_Complex_I); f[i0] = 0.5*(crealf(f[i0] + t1) + cimagf(f[i0] - t1)*_Complex_I); } } return; /* forward fourier transform */ L90: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I; f[l1] = conjf(f[ll] - t1); f[ll] += t1; } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I; f[i1] = conjf(f[i0] - t1); f[i0] += t1; } } /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3xy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in x and y are performed f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]* exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, three forward fourier transforms in x and y are performed f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd; int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float at1, at2, ani; float complex t1, t2, t3, t4; __m512i v_j, v_kmr, v_m, v_n, v_l, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); nxhhs = 2*(nxhh/2); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); v_half = _mm512_set1_ps(0.5f); if (isign > 0) goto L230; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(f[2+4*j+joff]); */ /* at2 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */ /* + crealf(f[3+4*j+joff])*_Complex_I; */ /* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { at1 = cimagf(f[2+4*j+joff]); at2 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = crealf(f[1+4*j+joff]) + crealf(f[3+4*j+joff])*_Complex_I; f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = ani*(t1 + t2); f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); } } } /* ani = 2.0*ani; */ v_ani = _mm512_add_ps(v_ani,v_ani); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_mul_ps(v_ani,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = ani*((crealf(f[jj+joff]) */ /* + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I); */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); v_t3 = _mm512_mul_ps(v_ani,v_t3); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = f[jj+k1]; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); /* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */ /* + crealf(f[jj+joff] - t1)*_Complex_I); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); v_t3 = _mm512_mul_ps(v_half,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */ /* + cimagf(f[jj+joff] - t1)*_Complex_I); */ /* } */ v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1); v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1); v_t2 = _mm512_mul_ps(v_half,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2); } } return; /* forward fourier transform */ L230: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177); /* f[jj+k1] = conjf(f[jj+joff] - t1); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42), v_zero,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] += t1; */ /* } */ v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t2); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = t1 + t2; f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); } } } for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_add_ps(v_t1,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I; */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */ /* + cimagf(f[3+4*j+joff])*_Complex_I; */ /* at1 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = cimagf(f[4*j+joff]) */ /* + cimagf(f[1+4*j+joff])*_Complex_I; */ /* at2 = crealf(f[1+4*j+joff]); */ /* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { f[3+4*j+joff] = cimagf(f[2+4*j+joff]) + cimagf(f[3+4*j+joff])*_Complex_I; at1 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = cimagf(f[4*j+joff]) + cimagf(f[1+4*j+joff])*_Complex_I; at2 = crealf(f[1+4*j+joff]); f[1+4*j+joff] = at1 + 0.0*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3z(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in z are performed f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, three forward fourier transforms in z are performed f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff; int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr; int i0, i1; int nxhs; float complex t1, t2, t3, t4; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L110; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = f[jj+l1]; f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1) + crealf(f[jj+ll] - t1)*_Complex_I); f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1) + cimagf(f[jj+ll] - t1)*_Complex_I); } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = f[jj+i1]; f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1) + crealf(f[jj+i0] - t1)*_Complex_I); f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1) + cimagf(f[jj+i0] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L110: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I; f[jj+l1] = conjf(f[jj+ll] - t1); f[jj+ll] += t1; } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I; f[jj+i1] = conjf(f[jj+i0] - t1); f[jj+i0] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for real to complex fft, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for 3 2d real to complex ffts, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void ckncgppush3lt_(float *ppart, float *fxyz, int *kpic, float *qbm, float *dt, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgppush3lt(ppart,fxyz,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*nz, *mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgppushf3lt_(float *ppart, float *fxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgppushf3lt(ppart,fxyz,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx, *nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1, *mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz, *mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncmpois33_(float complex *q, float complex *fxyz, int *isign, float complex *ffc, float *ax, float *ay, float *az, float *affp, float *we, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh, *nyv,*nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; }
Layer_GRU.h
// // smarties // Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved. // Distributed under the terms of the MIT license. // // Created by Guido Novati (novatig@ethz.ch). // #ifndef smarties_MGULayer_h #define smarties_MGULayer_h #include "Layers.h" namespace smarties { class MGULayer: public Layer { // MGU (Minimal Gated Unit) input(t) -> [GRU] -> output(t) // forget(t) = sigmoid (Wfr output(t-1) + Wff input(t) + bf) // state(t) = tanh (Wsr [forget(t) * output(t-1)] + Wsf input(t) + bs // output(t) = (1 - forget(t)) * output(t-1) + forget(t) * state(t) // Where * and + are element-wise ops, weight-vector multiplication is implied const Uint nInputs, nCells; const std::unique_ptr<Function> cell; public: void requiredParameters(std::vector<Uint>& nWeight, std::vector<Uint>& nBiases ) const override { //cell, input, forget, output gates all linked to inp and prev LSTM output nWeight.push_back(2*nCells * (nInputs + nCells) ); nBiases.push_back(2*nCells); } void requiredActivation(std::vector<Uint>& sizes, std::vector<Uint>& bOutputs, std::vector<Uint>& bInputs) const override { sizes.push_back(2*nCells); bOutputs.push_back(bOutput); bInputs.push_back(bInput); } void biasInitialValues(const std::vector<Real> init) override { } MGULayer(Uint _ID, Uint _nInputs, Uint _nCells, std::string funcType, bool bOut, Uint iLink) : Layer(_ID, _nCells, bOut, false, iLink), nInputs(_nInputs), nCells(_nCells), cell(makeFunction(funcType)) { spanCompInpGrads = _nInputs; if(_nCells % ARY_WIDTH) die("hardcoded simd: pick size multiple of 8 for float and 4 for double"); } std::string printSpecs() const override { std::ostringstream o; o<<"("<<ID<<") "<<cell->name() <<std::string(bOutput? " output ":" ") <<"MGU Layer of size:"<<nCells <<" linked to Layer:"<<ID-link <<" of size:"<<nInputs<<"\n"; return o.str(); } void forward( const Activation*const prev, const Activation*const curr, const Parameters*const para) const override { // linearOutput contains input to all cell inputs and gates // it then gets split into first nCell components and last nCell components nnReal* const forget = curr->X(ID); // first nCell is forget gate nnReal* const state = curr->X(ID) + nCells; // last nCell is cell state nnReal* const output = curr->Y(ID); // MGU output // para->W(ID) contains [Wff Wsf Wfr Wsr] := [ weights feedforward forget, // w ff cellstate, w recurrent forget, w recur cellstate ] { nnReal* const linearOutput = curr->X(ID); // both forget and cell state memcpy(linearOutput, para->B(ID), 2*nCells*sizeof(nnReal)); // add bias const nnReal* const inputs = curr->Y(ID-link); // output of prev layer const nnReal* const weight = para->W(ID); // weights for feedforward op for (Uint i = 0; i < nInputs; ++i) { const nnReal* const W = weight + (2*nCells)*i; #pragma omp simd aligned(linearOutput, inputs, W : VEC_WIDTH) for (Uint o = 0; o < 2*nCells; ++o) linearOutput[o] += inputs[i] * W[o]; } } if(prev not_eq nullptr) // if not at first time step { // forget = = sigm [ Wfr prevOutput + Wff inputs + b ] const nnReal* const inputs = prev->Y(ID); // recurrent connection weights are shifted by (2*nCells)*nInputs: const nnReal* const weightRecur = para->W(ID) +(2*nCells)*nInputs; for (Uint i=0; i<nCells; ++i) { const nnReal* const Wfr = weightRecur + (2*nCells)*i; #pragma omp simd aligned(forget, inputs, Wfr : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) forget[o] += Wfr[o] * inputs[i]; } Sigm::_eval(forget, forget, nCells); // state = tanh [ Wsr (forget \elemProd prevOut) + Wsf inputs + b ] for (Uint i=0; i<nCells; ++i) { const nnReal* const Wsr = weightRecur + (2*nCells)*i +nCells; #pragma omp simd aligned(state, forget, inputs, Wsr : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) state[o] += Wsr[o] * inputs[i] * forget[i]; } Tanh::_eval(state, state, nCells); // output = = (1 - forget) \elemProd prevOut + forget \elemProd state #pragma omp simd aligned(output, forget, inputs, state : VEC_WIDTH) for (Uint o=0; o<nCells; ++o) output[o] = forget[o]*state[o] + (1-forget[o])*inputs[o]; } else { Sigm::_eval(forget, forget, nCells); Tanh::_eval(state, state, nCells); #pragma omp simd aligned(output, forget, state : VEC_WIDTH) for (Uint o=0; o<nCells; ++o) output[o] = forget[o]*state[o]; } } void backward( const Activation*const prev, const Activation*const curr, const Activation*const next, const Parameters*const grad, const Parameters*const para) const override { using Utilities::allocate_ptr; const nnReal* const forget = curr->X(ID); const nnReal* const state = curr->X(ID) + nCells; const nnReal* const dLdO = curr->E(ID); // dLossdGRU, comes from backprop nnReal* const dLdF = curr->E(ID) + nCells; // dLoss dForgetGate // curr->Y(ID) + nCells is unused memory: used here to store dLoss dState nnReal* const dLdS = curr->Y(ID) + nCells; nnReal* const prevOut = prev==nullptr? allocate_ptr(nCells) : prev->Y(ID); nnReal* const dLdprevOut = prev==nullptr? nullptr : prev->E(ID); // temp buffer for dLoss d(forget * previousInput) through state update nnReal* const dLdFprevOut = allocate_ptr(nCells); // 1) dLdS = forget * dLdO * tanh' (so it is actually dLoss d InputToTanh) #pragma omp simd aligned(dLdS, dLdO, forget, state : VEC_WIDTH) for (Uint o=0; o<nCells; ++o) dLdS[o] = dLdO[o] * forget[o] * (1-state[o]*state[o]); // 2) dLdFprevOut = Wsr * dLdS if(prev not_eq nullptr) { const nnReal*const Wsr = para->W(ID) + (2*nCells)*nInputs + nCells; #ifdef USE_OMPSIMD_BLAS GEMVomp(nCells, nCells, 2*nCells, Wsr, dLdS, dLdFprevOut); #else SMARTIES_gemv(CblasRowMajor, CblasNoTrans, nCells, nCells, 1, Wsr, 2*nCells, dLdS, 1, 0, dLdFprevOut, 1); #endif } // 3) dLdF = ((state - prevOut) * dLdO + dLdFprevOut * prevOut) * sigm' #pragma omp simd aligned(dLdF,prevOut,state,dLdO,forget,dLdFprevOut : VEC_WIDTH) for (Uint o=0; o<nCells; ++o) dLdF[o] = ((state[o]-prevOut[o])*dLdO[o] + dLdFprevOut[o]*prevOut[o]) * forget[o] * (1-forget[o]); // 4) dLdprevOut = (1-forget)*dLdO + dLdFprevOut*forget + Wfr*dFdL if(prev not_eq nullptr) { #pragma omp simd aligned(dLdprevOut,forget,dLdO,dLdFprevOut : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) // first two terms of 4) are elt-wise: dLdprevOut[o] += (1-forget[o])*dLdO[o] + forget[o]*dLdFprevOut[o]; // last term of 4): const nnReal * const Wfr = para->W(ID) +(2*nCells)*nInputs; #ifdef USE_OMPSIMD_BLAS GEMVomp(nCells, nCells, 2*nCells, Wfr, dLdF, dLdprevOut); #else SMARTIES_gemv(CblasRowMajor, CblasNoTrans, nCells, nCells, 1, Wfr, 2*nCells, dLdF, 1, 1, dLdprevOut, 1); #endif } free(dLdFprevOut); // backprop dL to input dLdI = Wff * dLdF + Wsf * dLdS if( spanCompInpGrads ) { nnReal* const dLdInput = curr->E(ID-link) + startCompInpGrads; const nnReal* const Wff = para->W(ID) +startCompInpGrads*2*nCells; const nnReal* const Wsf = para->W(ID) +startCompInpGrads*2*nCells +nCells; #ifdef USE_OMPSIMD_BLAS GEMVomp(nCells, spanCompInpGrads, 2*nCells, Wff, dLdF, dLdInput); GEMVomp(nCells, spanCompInpGrads, 2*nCells, Wsf, dLdS, dLdInput); #else SMARTIES_gemv(CblasRowMajor, CblasNoTrans, spanCompInpGrads, nCells, 1, Wff, 2*nCells, dLdF, 1, 1, dLdInput, 1); SMARTIES_gemv(CblasRowMajor, CblasNoTrans, spanCompInpGrads, nCells, 1, Wsf, 2*nCells, dLdS, 1, 1, dLdInput, 1); #endif } if(prev==nullptr) { free(prevOut); } if(grad == nullptr) return; // then no need to compute grad w.r.t. params { nnReal* const grad_b = grad->B(ID); #pragma omp simd aligned(grad_b, dLdF, dLdS : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) { grad_b[o] += dLdF[o]; grad_b[o+nCells] += dLdS[o]; } } { const nnReal* const inputs = curr->Y(ID-link); for(Uint i=0; i<nInputs; ++i) { nnReal* const G = grad->W(ID) + (2*nCells)*i; #pragma omp simd aligned(G, inputs, dLdF, dLdS : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) { G[o] += inputs[i] * dLdF[o]; G[o+nCells] += inputs[i] * dLdS[o]; } } } if(prev not_eq nullptr) { for(Uint i=0; i<nCells; ++i) { nnReal* const G = grad->W(ID) + 2*nCells * (nInputs + i); #pragma omp simd aligned(G, prevOut, dLdF, dLdS, forget : VEC_WIDTH) for(Uint o=0; o<nCells; ++o) { G[o] += prevOut[i] * dLdF[o]; G[o+nCells] += prevOut[i] * dLdS[o] * forget[i]; } } } } void initialize(std::mt19937& G, const Parameters*const W, Real initializationFac) const override { const nnReal fac = (initializationFac>0) ? initializationFac : 1; const nnReal init = fac * cell->initFactor(nInputs, nCells); std::uniform_real_distribution<nnReal> dis(-init, init); { // forget gate starts open, inp/out gates are closed nnReal* const BB = W->B(ID); for(Uint o=0*nCells; o<1*nCells; ++o) BB[o] = 0+LSTM_PRIME_FAC; for(Uint o=1*nCells; o<2*nCells; ++o) BB[o] = 0; } { nnReal* const weight = W->W(ID); for(Uint w=0; w<2*nCells*(nInputs+nCells); ++w) weight[w] = dis(G); } } size_t save(const Parameters * const para, float * tmp) const override { const nnReal* const bias = para->B(ID); const nnReal* const weight = para->W(ID); for (Uint n=0; n<2*nCells * (nInputs+nCells); ++n) *(tmp++) = (float) weight[n]; for (Uint n=0; n<2*nCells; ++n) *(tmp++) = (float) bias[n]; return 2*nCells * (nInputs+nCells + 1); } size_t restart(const Parameters * const para, const float * tmp) const override { nnReal* const bias = para->B(ID); nnReal* const weight = para->W(ID); for (Uint n=0; n<2*nCells * (nInputs+nCells); ++n) weight[n] = (nnReal) *(tmp++); for (Uint n=0; n<2*nCells; ++n) bias[n] = (nnReal) *(tmp++); return 2*nCells * (nInputs+nCells + 1); } }; } // end namespace smarties #endif // smarties_Quadratic_term_h
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint64) // C=scalar+B GB (_bind1st__bshift_uint64) // C=scalar+B' GB (_bind1st_tran__bshift_uint64) // C=A+scalar GB (_bind2nd__bshift_uint64) // C=A'+scalar GB (_bind2nd_tran__bshift_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// \brief Kind of the directive. OpenMPDirectiveKind Kind; /// \brief Starting location of the directive (directive keyword). SourceLocation StartLoc; /// \brief Ending location of the directive. SourceLocation EndLoc; /// \brief Numbers of clauses. const unsigned NumClauses; /// \brief Number of child expressions/stmts. const unsigned NumChildren; /// \brief Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// \brief Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// \brief Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// \brief Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// \brief Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// \brief Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// \brief Returns starting location of directive kind. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns ending location of directive. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// \brief Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// \brief Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// \brief Returns statement associated with the directive. Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return const_cast<Stmt *>(*child_begin()); } /// \brief Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); return child_range(ChildStorage, ChildStorage + NumChildren); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } }; /// \brief This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief true if the construct has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// \brief Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// \brief This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// \brief Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, // Offset to the end (and start of the following counters/updates/finals // arrays) for combined distribute loop directives. CombinedDistributeEnd = 28, }; /// \brief Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// \brief Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// \brief Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// \brief Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters, // PrivateCounters, Inits, // Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically sheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically sheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; }; /// \brief The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// \brief Loop iteration variable. Expr *IterationVarRef; /// \brief Loop last iteration number. Expr *LastIteration; /// \brief Loop number of iterations. Expr *NumIterations; /// \brief Calculation of last iteration. Expr *CalcLastIteration; /// \brief Loop pre-condition. Expr *PreCond; /// \brief Loop condition. Expr *Cond; /// \brief Loop iteration variable init. Expr *Init; /// \brief Loop increment. Expr *Inc; /// \brief IsLastIteration - local flag variable passed to runtime. Expr *IL; /// \brief LowerBound - local variable passed to runtime. Expr *LB; /// \brief UpperBound - local variable passed to runtime. Expr *UB; /// \brief Stride - local variable passed to runtime. Expr *ST; /// \brief EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// \brief Update of LowerBound for statically sheduled 'omp for' loops. Expr *NLB; /// \brief Update of UpperBound for statically sheduled 'omp for' loops. Expr *NUB; /// \brief PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// \brief PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// \brief DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// \brief PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// \brief Counters Loop counters. SmallVector<Expr *, 4> Counters; /// \brief PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// \brief Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// \brief Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// \brief Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// \brief Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// \brief Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; } }; /// \brief Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. Stmt *Body = getAssociatedStmt()->IgnoreContainers(true); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// \brief This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// \brief This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief true if current directive has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// \brief This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// \brief This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief true if current directive has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// \brief This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief true if current directive has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// \brief Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// \brief This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// \brief This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// \brief Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// \brief This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Name of the directive. DeclarationNameInfo DirName; /// \brief Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// \brief Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// \brief Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// \brief This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief true if current region has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// \brief This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// \brief This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief true if current directive has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// \brief This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief true if this directive has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// \brief This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// \brief This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// \brief This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// \brief This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// \brief This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// \brief This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// \brief Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// \brief Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// \brief Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// \brief Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// \brief Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// \brief Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// \brief Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// \brief Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// \brief Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// \brief Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// \brief This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// \brief This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// \brief This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/0) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/0) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPTargetEnterDataDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// \brief Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// \brief This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/0) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/0) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPTargetExitDataDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// \brief Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// \brief This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// \brief This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief true if current region has inner cancel directive. bool HasCancel; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// \brief Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// \brief Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// \brief This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// \brief This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// \brief Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// \brief Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// \brief Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// \brief This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// \brief Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// \brief Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// \brief This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// \brief This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// \brief This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// \brief This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 0) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPTargetUpdateDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// \brief This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" // enable tests #define CHECK 1 #define DEBUG 0 #define N (992) #define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i;}) int main(void){ #if CHECK check_offloading(); #endif /* * Default device */ printf("Is%s initial device\n", omp_is_initial_device() ? "" : " not"); printf("Initial device: %d\n", omp_get_initial_device()); omp_set_default_device(1); printf("Default device before task: %d\n", omp_get_default_device()); #pragma omp task { printf("Default device inside task: %d\n", omp_get_default_device()); omp_set_default_device(2); printf("Default device inside task after resetting: %d\n", omp_get_default_device()); } printf("Default device outside task: %d\n", omp_get_default_device()); // default device can set to whatever, if target fails, it goes to the host const int default_device = 0; omp_set_default_device(default_device); // default device for omp target call MUST be >= 0 and <omp_get_num_devices() or // the initial device. So when there are no devices, it must be the initial device int default_device_omp_target_call = default_device; if (omp_get_num_devices() == 0) { default_device_omp_target_call = omp_get_initial_device(); } #if DEBUG printf("test on machine with %d devices\n", omp_get_num_devices()); #endif /* * Target alloc & target memcpy */ double A[N], B[N], C[N], D[N], E[N]; double *pA, *pB, *pC, *pD, *pE; // map ptrs pA = &A[0]; pB = &B[0]; pC = &C[0]; pD = &D[0]; pE = &E[0]; INIT(); pA = pA - 10; pC = pC - 20; pD = pD - 30; void *device_A = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); void *device_C = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); void *device_D = omp_target_alloc(N*sizeof(double), default_device_omp_target_call); double *dpA = (double *) device_A - 100; double *dpC = (double *) device_C - 200; double *dpD = (double *) device_D - 300; printf("omp_target_alloc %s\n", device_A && device_C && device_D ? "succeeded" : "failed"); omp_target_memcpy(dpC, pC, N*sizeof(double), 200*sizeof(double), 20*sizeof(double), default_device_omp_target_call, omp_get_initial_device()); omp_target_memcpy(dpD, pD, N*sizeof(double), 300*sizeof(double), 30*sizeof(double), default_device_omp_target_call, omp_get_initial_device()); #pragma omp target is_device_ptr(dpA, dpC, dpD) device(default_device) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) dpA[i+100] = dpC[i+200] + dpD[i+300] + 1; } omp_target_memcpy(pA, dpA, N*sizeof(double), 10*sizeof(double), 100*sizeof(double), omp_get_initial_device(), default_device_omp_target_call); int fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test omp_target_memcpy: Failed\n"); } else { printf ("Test omp_target_memcpy: Succeeded\n"); } /* * target_is_present and target_associate/disassociate_ptr */ INIT(); if (offloading_disabled()) { // If offloading is disabled just recreate the messages so that this can // also be tested with no device. printf("C is not present, associating it...\n"); printf("omp_target_associate_ptr C %s\n", 1 ? "succeeded" : "failed"); } else if (!omp_target_is_present(C, default_device_omp_target_call)) { printf("C is not present, associating it...\n"); int rc = omp_target_associate_ptr(C, dpC, N*sizeof(double), 200*sizeof(double), default_device_omp_target_call); printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed"); } if (offloading_disabled()) { // If offloading is disabled just recreate the messages so that this can // also be tested with no device. printf("D is not present, associating it...\n"); printf("omp_target_associate_ptr D %s\n", 1 ? "succeeded" : "failed"); } else if (!omp_target_is_present(D, default_device_omp_target_call)) { printf("D is not present, associating it...\n"); int rc = omp_target_associate_ptr(D, dpD, N*sizeof(double), 300*sizeof(double), default_device_omp_target_call); printf("omp_target_associate_ptr D %s\n", !rc ? "succeeded" : "failed"); } #pragma omp target data map(from: C, D) device(default_device) { printf("Inside target data: A is%s present\n", (omp_target_is_present(A, default_device_omp_target_call) && !offloading_disabled()) ? "" : " not"); printf("Inside target data: C is%s present\n", omp_target_is_present(C, default_device_omp_target_call) ? "" : " not"); printf("Inside target data: D is%s present\n", omp_target_is_present(D, default_device_omp_target_call) ? "" : " not"); // C and D are mapped "from", so there is no copy from host to device. // If the association was successful, their corresponding device arrays // are already populated from previous omp_target_memcpy with the correct // values and the following target for-loop must yield the correct results. #pragma omp target map(from: A) device(default_device) { #pragma omp parallel for schedule(static,1) for (int i = 0; i < 992; i++) A[i] = C[i] + D[i] + 1; } } if (offloading_disabled()) { printf("C is present, disassociating it...\n"); printf("omp_target_disassociate_ptr C %s\n", 1 ? "succeeded" : "failed"); } else if (omp_target_is_present(C, default_device_omp_target_call)) { printf("C is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(C, default_device_omp_target_call); printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed"); } if (offloading_disabled()) { printf("D is present, disassociating it...\n"); printf("omp_target_disassociate_ptr D %s\n", 1 ? "succeeded" : "failed"); } else if (omp_target_is_present(D, default_device_omp_target_call)) { printf("D is present, disassociating it...\n"); int rc = omp_target_disassociate_ptr(D, default_device_omp_target_call); printf("omp_target_disassociate_ptr D %s\n", !rc ? "succeeded" : "failed"); } fail = 0; VERIFY(0, N, A[i], (double)(i+2)); if (fail) { printf ("Test omp_target_associate_ptr: Failed\n"); } else { printf ("Test omp_target_associate_ptr: Succeeded\n"); } omp_target_free(device_A, default_device_omp_target_call); omp_target_free(device_C, default_device_omp_target_call); omp_target_free(device_D, default_device_omp_target_call); return 0; }
pbkdf2-hmac-sha512_fmt_plug.c
/* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on hmac-sha512 by magnum * * Minor fixes, format unification and OMP support done by Dhiru Kholia * <dhiru@openwall.com> * * Fixed for supporting $ml$ "dave" format as well as GRUB native format by * magnum 2013. Note: We support a binary size of >512 bits (64 bytes / 128 * chars of hex) but we currently do not calculate it even in cmp_exact(). The * chance for a 512-bit hash collision should be pretty dang slim. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha512; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha512); #else #include <ctype.h> #include <string.h> #include <assert.h> #include <stdint.h> #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "sha2.h" #include "johnswap.h" #include "pbkdf2_hmac_common.h" #include "pbkdf2_hmac_sha512.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA512" #undef FORMAT_NAME #define FORMAT_NAME "GRUB2 / OS X 10.8+" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define PAD_SIZE 128 #define PLAINTEXT_LENGTH 125 static struct custom_salt { uint8_t length; uint8_t salt[PBKDF2_64_MAX_SALT_SIZE]; uint32_t rounds; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[PBKDF2_SHA512_BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; char delim; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA512_TAG_LEN; cs.rounds = atou(ciphertext); delim = strchr(ciphertext, '.') ? '.' : '$'; ciphertext = strchr(ciphertext, delim) + 1; p = strchr(ciphertext, delim); saltlen = 0; while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA512_BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA512_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA512_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha512_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA512_BINARY_SIZE, sizeof(uint32_t), SALT_SIZE, sizeof(ARCH_WORD), MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, { PBKDF2_SHA512_FORMAT_TAG, FORMAT_TAG_ML, FORMAT_TAG_GRUB }, pbkdf2_hmac_sha512_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha512_prepare, pbkdf2_hmac_sha512_valid, pbkdf2_hmac_sha512_split, pbkdf2_hmac_sha512_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
bli_dotv_opt_var1.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of The University of Texas nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" #define FUNCPTR_T dotv_fp typedef void (*FUNCPTR_T)( conj_t conjx, conj_t conjy, dim_t n, void* x, inc_t incx, void* y, inc_t incy, void* rho ); // If some mixed datatype functions will not be compiled, we initialize // the corresponding elements of the function array to NULL. #ifdef BLIS_ENABLE_MIXED_PRECISION_SUPPORT static FUNCPTR_T GENARRAY3_ALL(ftypes,dotv_opt_var1); #else #ifdef BLIS_ENABLE_MIXED_DOMAIN_SUPPORT static FUNCPTR_T GENARRAY3_EXT(ftypes,dotv_opt_var1); #else static FUNCPTR_T GENARRAY3_MIN(ftypes,dotv_opt_var1); #endif #endif void bli_dotv_opt_var1( obj_t* x, obj_t* y, obj_t* rho ) { num_t dt_x = bli_obj_datatype( *x ); num_t dt_y = bli_obj_datatype( *y ); num_t dt_rho = bli_obj_datatype( *rho ); conj_t conjx = bli_obj_conj_status( *x ); conj_t conjy = bli_obj_conj_status( *y ); dim_t n = bli_obj_vector_dim( *x ); inc_t inc_x = bli_obj_vector_inc( *x ); void* buf_x = bli_obj_buffer_at_off( *x ); inc_t inc_y = bli_obj_vector_inc( *y ); void* buf_y = bli_obj_buffer_at_off( *y ); void* buf_rho = bli_obj_buffer_at_off( *rho ); FUNCPTR_T f; // Index into the type combination array to extract the correct // function pointer. f = ftypes[dt_x][dt_y][dt_rho]; // Invoke the function. f( conjx, conjy, n, buf_x, inc_x, buf_y, inc_y, buf_rho ); } #undef GENTFUNC3 #define GENTFUNC3( ctype_x, ctype_y, ctype_r, chx, chy, chr, opname, varname ) \ \ void PASTEMAC3(chx,chy,chr,varname)( \ conj_t conjx, \ conj_t conjy, \ dim_t n, \ void* x, inc_t incx, \ void* y, inc_t incy, \ void* rho \ ) \ { \ ctype_x* x_cast = x; \ ctype_y* y_cast = y; \ ctype_r* rho_cast = rho; \ ctype_x* chi1; \ ctype_y* psi1; \ ctype_r dotxy; \ dim_t i; \ conj_t conjx_use; \ \ if ( bli_zero_dim1( n ) ) \ { \ PASTEMAC(chr,set0s)( *rho_cast ); \ return; \ } \ \ PASTEMAC(chr,set0s)( dotxy ); \ \ chi1 = x_cast; \ psi1 = y_cast; \ \ conjx_use = conjx; \ \ /* If y must be conjugated, we do so indirectly by first toggling the effective conjugation of x and then conjugating the resulting dot product. */ \ if ( bli_is_conj( conjy ) ) \ bli_toggle_conj( conjx_use ); \ \ if ( bli_is_conj( conjx_use ) ) \ { \ for ( i = 0; i < n; ++i ) \ { \ PASTEMAC3(chx,chy,chr,dotjs)( *chi1, *psi1, dotxy ); \ \ chi1 += incx; \ psi1 += incy; \ } \ } \ else \ { \ for ( i = 0; i < n; ++i ) \ { \ PASTEMAC3(chx,chy,chr,dots)( *chi1, *psi1, dotxy ); \ \ chi1 += incx; \ psi1 += incy; \ } \ } \ \ if ( bli_is_conj( conjy ) ) \ PASTEMAC(chr,conjs)( dotxy ); \ \ PASTEMAC2(chr,chr,copys)( dotxy, *rho_cast ); \ } void bli_ddddotv_opt_var1( conj_t conjx, conj_t conjy, dim_t n, void* x_in, inc_t incx, void* y_in, inc_t incy, void* rho_in ) { double* restrict x = x_in; double* restrict y = y_in; double* rho = rho_in; bool_t use_ref = FALSE; // If the vector lengths are zero, set rho to zero and return. if ( bli_zero_dim1( n ) ) { PASTEMAC(d,set0s)( rho ); return; } // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) ) use_ref = TRUE; // Call the reference implementation if needed. if ( use_ref ) { bli_ddddotv_unb_var1( conjx, conjy, n, x, incx, y, incy, rho ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; double rhos = 0.0; #pragma omp parallel reduction(+:rhos) { dim_t n_threads; dim_t t_id = omp_get_thread_num(); n_threads = omp_get_num_threads(); vector4double rhov = vec_splats( 0.0 ); vector4double xv, yv; for ( dim_t i = t_id; i < n_run; i += n_threads ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); rhov = vec_madd( xv, yv, rhov ); } rhos += vec_extract( rhov, 0 ); rhos += vec_extract( rhov, 1 ); rhos += vec_extract( rhov, 2 ); rhos += vec_extract( rhov, 3 ); } for ( dim_t i = n_left; i < n_left; i++ ) { rhos += x[4*n_run + i] * y[4*n_run + i]; } *rho = rhos; } // Define the basic set of functions unconditionally, and then also some // mixed datatype functions if requested. //INSERT_GENTFUNC3_BASIC( dotv, dotv_opt_var1 ) GENTFUNC3( float, float, float, s, s, s, dotv, dotv_opt_var1 ) //GENTFUNC3( double, double, double, d, d, d, dotv, dotv_opt_var1 ) GENTFUNC3( scomplex, scomplex, scomplex, c, c, c, dotv, dotv_opt_var1 ) GENTFUNC3( dcomplex, dcomplex, dcomplex, z, z, z, dotv, dotv_opt_var1 ) #ifdef BLIS_ENABLE_MIXED_DOMAIN_SUPPORT INSERT_GENTFUNC3_MIX_D( dotv, dotv_opt_var1 ) #endif #ifdef BLIS_ENABLE_MIXED_PRECISION_SUPPORT INSERT_GENTFUNC3_MIX_P( dotv, dotv_opt_var1 ) #endif
aix_ssha_fmt_plug.c
/* AIX ssha cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com> and magnum. * * Thanks to atom (of hashcat project) and philsmd for discovering and * publishing the details of various AIX hashing algorithms. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * magnum, and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_aixssha1; extern struct fmt_main fmt_aixssha256; extern struct fmt_main fmt_aixssha512; #elif FMT_REGISTERS_H john_register_one(&fmt_aixssha1); john_register_one(&fmt_aixssha256); john_register_one(&fmt_aixssha512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // Tuned on i7 w/HT for SHA-256 #endif #endif #include "pbkdf2_hmac_sha1.h" #include "pbkdf2_hmac_sha256.h" #include "pbkdf2_hmac_sha512.h" #include "memdbg.h" #define FORMAT_LABEL_SHA1 "aix-ssha1" #define FORMAT_LABEL_SHA256 "aix-ssha256" #define FORMAT_LABEL_SHA512 "aix-ssha512" #define FORMAT_NAME_SHA1 "AIX LPA {ssha1}" #define FORMAT_NAME_SHA256 "AIX LPA {ssha256}" #define FORMAT_NAME_SHA512 "AIX LPA {ssha512}" #define FORMAT_TAG1 "{ssha1}" #define FORMAT_TAG256 "{ssha256}" #define FORMAT_TAG512 "{ssha512}" #define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1) #define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1) #define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_64 #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 /* actual max in AIX is 255 */ #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define CMP_SIZE BINARY_SIZE - 2 #define LARGEST_BINARY_SIZE 64 #define MAX_SALT_SIZE 24 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests aixssha_tests1[] = { {"{ssha1}06$T6numGi8BRLzTYnF$AdXq1t6baevg9/cu5QBBk8Xg.se", "whatdoyouwantfornothing$$$$$$"}, {"{ssha1}06$6cZ2YrFYwVQPAVNb$1agAljwERjlin9RxFxzKl.E0.sJ", "gentoo=>meh"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha1}06$uOYCzfO5dt0EdnwG$CK81ljQknzEAcfwg97cocEwz.gv", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests256[] = { {"{ssha256}06$YPhynOx/iJQaJOeV$EXQbOSYZftEo3k01uoanAbA7jEKZRUU9LCCs/tyU.wG", "verylongbutnotverystrongpassword"}, {"{ssha256}06$5lsi4pETf/0p/12k$xACBftDMh30RqgrM5Sppl.Txgho41u0oPoD21E1b.QT", "I<3JtR"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha256}06$qcXPTOQzDAqZuiHc$pS/1HC4uI5jIERNerX8.Zd0G/gDepZuqR7S5WHEn.AW", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests512[] = { {"{ssha512}06$y2/.O4drNJd3ecgJ$DhNk3sS28lkIo7XZaXWSkFOIdP2Zsd9DIKdYDSuSU5tsnl29Q7xTc3f64eAGMpcMJCVp/SXZ4Xgx3jlHVIOr..", "solarisalwaysbusyitseems"}, {"{ssha512}06$Dz/dDr1qa8JJm0UB$DFNu2y8US18fW37ht8WRiwhSeOqAMJTJ6mLDW03D/SeQpdI50GJMYb1fBog5/ZU3oM9qsSr9w6u22.OjjufV..", "idontbelievethatyourpasswordislongerthanthisone"}, /* hash posted on john-users */ {"{ssha512}06$................$0egLaF88SUk6GAFIMN/vTwa/IYB.KlubYmjiaWvmQ975vHvgC3rf0I6ZYzgyUiQftS8qs7ULLQpRLrA3LA....", "44"}, {"{ssha512}06$aXayEJGxA02Bl4d2$TWfWx34oD.UjrS/Qtco6Ij2XPY1CPYJfdk3CcxEjnMZvQw2p5obHYH7SI2wxcJgaS9.S9Hz948R.GdGwsvR...", "test"}, /* http://www.ibmsystemsmag.com/aix/administrator/security/password_hash/?page=2 <== partially corrupted hash? */ {"{ssha512}06$otYx2eSXx.OkEY4F$No5ZvSfhYuB1MSkBhhcKJIjS0.q//awdkcZwF9/TXi3EnL6QeronmS0jCc3P2aEV9WLi5arzN1YjVwkx8bng..", "colorado"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha512}06$w6THk2lJbkqW0rXv$yH6VWp3X9ad2l8nhYi22lrrmWskXvEU.PONbWUAZHrjhgQjdU83jtRnYmpRZIJzTVC3RFcoqpbtd63n/UdlS..", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; int type; unsigned char salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static inline int valid_common(char *ciphertext, struct fmt_main *self, int b64len, char *sig, int siglen) { char *p = ciphertext; int len; if (!strncmp(p, sig, siglen)) p += siglen; else return 0; len = strspn(p, DIGITCHARS); /* iterations, exactly two digits */ if (len != 2 || atoi(p) > 31) /* actual range is 4..31 */ return 0; p += 2; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* salt, 8..24 base64 chars */ if (len < 8 || len > MAX_SALT_SIZE) return 0; p += len; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* hash */ if (len != b64len) return 0; if (p[len] != 0) /* nothing more allowed */ return 0; return 1; } static int valid_sha1(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 27, FORMAT_TAG1, FORMAT_TAG1_LEN); } static int valid_sha256(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 43, FORMAT_TAG256, FORMAT_TAG256_LEN); } static int valid_sha512(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 86, FORMAT_TAG512, FORMAT_TAG512_LEN); } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; keeptr = ctcopy; memset(&cs, 0, sizeof(cs)); if ((strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN) == 0)) { cs.type = 1; ctcopy += FORMAT_TAG1_LEN; } else if ((strncmp(ciphertext, FORMAT_TAG256, FORMAT_TAG256_LEN) == 0)) { cs.type = 256; ctcopy += FORMAT_TAG256_LEN; } else { cs.type = 512; ctcopy += FORMAT_TAG512_LEN; } p = strtokm(ctcopy, "$"); cs.iterations = 1 << atoi(p); p = strtokm(NULL, "$"); strncpy((char*)cs.salt, p, 17); MEM_FREE(keeptr); return (void *)&cs; } #define TO_BINARY(b1, b2, b3) { \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out.c[b1] = value >> 16; \ out.c[b2] = value >> 8; \ out.c[b3] = value; } static void *get_binary(char *ciphertext) { static union { unsigned char c[LARGEST_BINARY_SIZE+3]; uint64_t dummy; } out; uint32_t value; char *pos = strrchr(ciphertext, '$') + 1; int len = strlen(pos); int i; memset(&out, 0, sizeof(out)); for (i = 0; i < len/4*3; i += 3) TO_BINARY(i, i + 1, i + 2); if (len % 3 == 1) { value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6); out.c[i] = value; } else if (len % 3 == 2) { /* sha-1, sha-256 */ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12); out.c[i++] = value >> 8; out.c[i++] = value; } return (void *)out.c; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int inc=1, index = 0; switch(cur_salt->type) { case 1: #ifdef SSE_GROUP_SZ_SHA1 inc = SSE_GROUP_SZ_SHA1; #endif break; case 256: #ifdef SSE_GROUP_SZ_SHA256 inc = SSE_GROUP_SZ_SHA256; #endif break; default: #ifdef SSE_GROUP_SZ_SHA512 inc = SSE_GROUP_SZ_SHA512; #endif break; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { int j = index; while (j < index + inc) { if (cur_salt->type == 1) { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)(saved_key[j]); x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha1((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else if (cur_salt->type == 256) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha256((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; //dump_stuff_msg("\nbinary ", binary, CMP_SIZE); for (; index < count; index++) { //dump_stuff_msg("crypt_out", crypt_out[index], CMP_SIZE); if (!memcmp(binary, crypt_out[index], CMP_SIZE-2)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], CMP_SIZE-2); } static int cmp_exact(char *source, int index) { return 1; } static void aixssha_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } /* report iteration count as tunable cost value */ static unsigned int aixssha_iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_aixssha1 = { { FORMAT_LABEL_SHA1, FORMAT_NAME_SHA1, ALGORITHM_NAME_SHA1, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA1, SSE_GROUP_SZ_SHA1, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG1 }, aixssha_tests1 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha1, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha256 = { { FORMAT_LABEL_SHA256, FORMAT_NAME_SHA256, ALGORITHM_NAME_SHA256, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA256, SSE_GROUP_SZ_SHA256, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG256 }, aixssha_tests256 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha256, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha512 = { { FORMAT_LABEL_SHA512, FORMAT_NAME_SHA512, ALGORITHM_NAME_SHA512, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_64 SSE_GROUP_SZ_SHA512, SSE_GROUP_SZ_SHA512, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG512 }, aixssha_tests512 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha512, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
task_late_fulfill.c
// RUN: %libarcher-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \ // RUN: %libarcher-run-race | FileCheck %s // Checked gcc 9.2 still does not support detach clause on task construct. // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9 // clang supports detach clause since version 11. // UNSUPPORTED: clang-10, clang-9, clang-8, clang-7 // icc compiler does not support detach clause. // UNSUPPORTED: icc // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> int main() { #pragma omp parallel #pragma omp master { omp_event_handle_t event; int a = 0, b = 0; omp_event_handle_t *f_event; #pragma omp task detach(event) depend(out : f_event) shared(f_event) { printf("%i: task 1\n", omp_get_thread_num()); f_event = &event; } usleep(10000); #pragma omp task depend(in : f_event) shared(f_event, a, b) { printf("%i: task 2, %p, %i, %i\n", omp_get_thread_num(), f_event, a, b); f_event = &event; } usleep(10000); a++; printf("%i: calling omp_fulfill_event\n", omp_get_thread_num()); omp_fulfill_event(*f_event); //#pragma omp task if (0) depend(in : f_event) // {} b++; usleep(10000); #pragma omp taskwait } return 0; } // no race for a++ in line 32: // CHECK-NOT: #0 {{.*}}task_late_fulfill.c:35 // we expect a race on f_event: // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 8 // CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:37 // CHECK: Previous write of size 8 // CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:26 // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:31 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task_late_fulfill.c:40