source
stringlengths
3
92
c
stringlengths
26
2.25M
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); exception=(&image->exception); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MaxTextExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); exception=(&image->exception); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); status=InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateContrastImage(image,sharpen,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) && 0 /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status != MagickFalse) return status; #endif histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) { if (stretch_map != (QuantumPixelPacket *) NULL) stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; IndexPacket *magick_restrict indexes; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \ distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,0,0,MagickTrue,exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) memset(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; PixelPacket *magick_restrict q; ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; MagickPixelPacket aggregate; PixelPacket pixel; const PixelPacket *magick_restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); } p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) /* Call OpenCL version */ status=AccelerateEqualizeImage(image,channel,&image->exception); if (status != MagickFalse) return status; #endif /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const IndexPacket *magick_restrict indexes; const PixelPacket *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) memset(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaImageTag "Gamma/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap, PerceptibleReciprocal(gamma))))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,PerceptibleReciprocal(gamma)); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,PerceptibleReciprocal(gamma)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,PerceptibleReciprocal(gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,PerceptibleReciprocal(gamma)); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), PerceptibleReciprocal(gamma))); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),PerceptibleReciprocal(gamma))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),PerceptibleReciprocal(gamma))); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),PerceptibleReciprocal(gamma))); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double area, offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; area=point.y; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel3); offset+=cube_size; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel4); area=point.z; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,area,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) != MagickFalse) || (IsGrayColorspace(white_color->colorspace) != MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *magick_restrict p; ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,Quantum *red, Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,Quantum *red, Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,Quantum *red, Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
softmax_hcl_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haoluo@openailab.com */ #include "softmax_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> #include <arm_neon.h> static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[0] != output_tensor->dims[0] || input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static inline float32x4_t vexpq10_f32(float32x4_t x) { x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10 x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); return x; } static void GetMaxArray(float* input, float* array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* array_ptr = ( float* )array; memset(array, 0, in_size * sizeof(float)); // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { float32x4_t _p = vld1q_f32(array_ptr + i); float32x4_t _in = vld1q_f32(input_ptr + j * in_size + i); #ifdef __aarch64__ _p = vpmaxq_f32(_p, _in); #else _p = vmaxq_f32(_p, vrev64q_f32(_in)); _p = vmaxq_f32(_p, vextq_f32(_p, _in, 2)); #endif vst1q_f32(array_ptr + i, _p); } for (int i = in_size & ~3; i < in_size; i++) { if (array_ptr[i] < input_ptr[j * in_size + i]) array_ptr[i] = input_ptr[j * in_size + i]; } /* for(int l = 0; l < in_size; l++) { if(array_ptr[l] < input_ptr[j * in_size + l]) array_ptr[l] = input_ptr[j * in_size + l]; } */ } } static void GetOutResult(float* input, float* output, float* maxarray, float* sum_array, int in_size, int on_size, int num_thread) { float* input_ptr = ( float* )input; float* output_ptr = ( float* )output; float* maxarray_ptr = ( float* )maxarray; float* sum_array_ptr = ( float* )sum_array; memset(sum_array, 0x0, in_size * sizeof(float)); /* get the exp and the summary */ // #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < on_size; j++) { // #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < (in_size & -4); i += 4) { int index = j * in_size + i; float32x4_t out = vexpq10_f32(vsubq_f32(vld1q_f32(input_ptr + index), vld1q_f32(maxarray_ptr + i))); float32x4_t sum = vaddq_f32(vld1q_f32(sum_array_ptr + i), out); vst1q_f32(output_ptr + index, out); vst1q_f32(sum_array_ptr + i, sum); } for (int i = in_size & ~3; i < in_size; i++) { int index = j * in_size + i; output_ptr[index] = exp(input_ptr[index] - maxarray_ptr[i]); sum_array_ptr[i] += output_ptr[index]; } } /* for(int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] = exp(input_ptr[index] - array_ptr[l]); sum_array_ptr[l] += output_ptr[index]; } */ /* the final result */ for (int j = 0; j < on_size; j++) for (int l = 0; l < in_size; l++) { int index = j * in_size + l; output_ptr[index] /= sum_array_ptr[l]; } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct softmax_param* softmax_param = ( struct softmax_param* )ir_node->op.param_mem; int element_size = input_tensor->elem_size; int dims[4]; for (int i = 0; i < input_tensor->dim_num; i++) { dims[i] = input_tensor->dims[i]; } int axis = softmax_param->axis; int out_size, in_size, on_size; out_size = 1; for (int i = 0; i < axis; i++) { out_size *= dims[i]; } in_size = 1; for (size_t i = axis + 1; i < input_tensor->dim_num; i++) { in_size *= dims[i]; } on_size = dims[axis]; uint8_t* input = input_tensor->data; uint8_t* output = output_tensor->data; float* max_array = ( float* )malloc(in_size * sizeof(float)); float* sum_array = ( float* )malloc(in_size * sizeof(float)); int on_in_size = on_size * in_size; float* input_f = NULL; float* output_f = NULL; if (element_size == 1) { input_f = ( float* )malloc(on_in_size * 4); output_f = ( float* )malloc(on_in_size * 4); /* todo */ free(input_f); free(output_f); } for (int i = 0; i < out_size; i++) { /* get max */ int img_base = i * on_in_size * element_size; GetMaxArray(( float* )(input + img_base), max_array, in_size, on_size, exec_graph->num_thread); GetOutResult(( float* )(input + img_base), ( float* )(output + img_base), max_array, sum_array, in_size, on_size, exec_graph->num_thread); } free(max_array); free(sum_array); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* todo support uint8 */ if (input_tensor->data_type != TENGINE_DT_FP32) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_softmax_hcl_arm_op(void* arg) { return register_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); } int unregister_softmax_hcl_arm_op(void* arg) { return unregister_builtin_node_ops(OP_SOFTMAX, &hcl_node_ops); }
GB_AxB_saxpy3_template.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_template: C=A*B, C<M>=A*B, or C<!M>=A*B via saxpy3 method //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GB_AxB_saxpy3_template.c computes C=A*B for any semiring and matrix types, // where C is sparse or hypersparse. #include "GB_unused.h" //------------------------------------------------------------------------------ // template code for C=A*B via the saxpy3 method //------------------------------------------------------------------------------ { // double ttt = omp_get_wtime ( ) ; //-------------------------------------------------------------------------- // get the chunk size //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // get M, A, B, and C //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; // const int64_t *restrict Ch = C->h ; const int64_t cvlen = C->vlen ; const int64_t cnvec = C->nvec ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int8_t *restrict Bb = B->b ; const int64_t *restrict Bi = B->i ; const bool B_iso = B->iso ; const int64_t bvlen = B->vlen ; const bool B_jumbled = B->jumbled ; const bool B_is_sparse = GB_IS_SPARSE (B) ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; const bool B_is_sparse_or_hyper = B_is_sparse || B_is_hyper ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int8_t *restrict Ab = A->b ; const int64_t *restrict Ai = A->i ; const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; const bool A_is_sparse = GB_IS_SPARSE (A) ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_iso = A->iso ; const bool A_jumbled = A->jumbled ; const bool A_ok_for_binary_search = ((A_is_sparse || A_is_hyper) && !A_jumbled) ; #if ( !GB_NO_MASK ) const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int8_t *restrict Mb = M->b ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_jumbled = GB_JUMBLED (M) ; size_t msize = M->type->size ; int64_t mnvec = M->nvec ; int64_t mvlen = M->vlen ; #endif #if !GB_A_IS_PATTERN const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; #endif #if !GB_B_IS_PATTERN const GB_BTYPE *restrict Bx = (GB_BTYPE *) B->x ; #endif //========================================================================== // phase2: numeric work for fine tasks //========================================================================== // Coarse tasks: nothing to do in phase2. // Fine tasks: compute nnz (C(:,j)), and values in Hx via atomics. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < nfine ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kk = SaxpyTasks [taskid].vector ; int team_size = SaxpyTasks [taskid].team_size ; int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int64_t pB = SaxpyTasks [taskid].start ; int64_t pB_end = SaxpyTasks [taskid].end + 1 ; int64_t pleft = 0, pright = anvec-1 ; int64_t j = GBH (Bh, kk) ; GB_GET_T_FOR_SECONDJ ; #if !GB_IS_ANY_PAIR_SEMIRING GB_CTYPE *restrict Hx = (GB_CTYPE *) SaxpyTasks [taskid].Hx ; #endif #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif if (use_Gustavson) { //------------------------------------------------------------------ // phase2: fine Gustavson task //------------------------------------------------------------------ // Hf [i] == 0: unlocked, i has not been seen in C(:,j). // Hx [i] is not initialized. // M(i,j) is 0, or M is not present. // if M: Hf [i] stays equal to 0 (or 3 if locked) // if !M, or no M: C(i,j) is a new entry seen for 1st time // Hf [i] == 1: unlocked, i has not been seen in C(:,j). // Hx [i] is not initialized. M is present. // M(i,j) is 1. (either M or !M case) // if M: C(i,j) is a new entry seen for the first time. // if !M: Hf [i] stays equal to 1 (or 3 if locked) // Hf [i] == 2: unlocked, i has been seen in C(:,j). // Hx [i] is initialized. This case is independent of M. // Hf [i] == 3: locked. Hx [i] cannot be accessed. int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; #if ( GB_NO_MASK ) { // phase2: fine Gustavson task, C(:,j)=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_phase2.c" } #elif ( !GB_MASK_COMP ) { // phase2: fine Gustavson task, C(:,j)<M(:,j)>=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_M_phase2.c" } #else { // phase2: fine Gustavson task, C(:,j)<!M(:,j)>=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_notM_phase2.c" } #endif } else { //------------------------------------------------------------------ // phase2: fine hash task //------------------------------------------------------------------ // Each hash entry Hf [hash] splits into two parts, (h,f). f // is in the 2 least significant bits. h is 62 bits, and is // the 1-based index i of the C(i,j) entry stored at that // location in the hash table. // If M is present (M or !M), and M(i,j)=1, then (i+1,1) // has been inserted into the hash table, in phase0. // Given Hf [hash] split into (h,f) // h == 0, f == 0: unlocked and unoccupied. // note that if f=0, h must be zero too. // h == i+1, f == 1: unlocked, occupied by M(i,j)=1. // C(i,j) has not been seen, or is ignored. // Hx is not initialized. M is present. // if !M: this entry will be ignored in C. // h == i+1, f == 2: unlocked, occupied by C(i,j). // Hx is initialized. M is no longer // relevant. // h == (anything), f == 3: locked. int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; #if ( GB_NO_MASK ) { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)=A*B(:,j) //-------------------------------------------------------------- // no mask present, or mask ignored #undef GB_CHECK_MASK_ij #include "GB_AxB_saxpy3_fineHash_phase2.c" } #elif ( !GB_MASK_COMP ) { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)<M(:,j)>=A*B(:,j) //-------------------------------------------------------------- GB_GET_M_j ; // get M(:,j) if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural const int8_t *restrict Mjb = Mb + pM_start ; #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (!Mjb [i]) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (!mij) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } } else { // M(:,j) is sparse and scattered into Hf #include "GB_AxB_saxpy3_fineHash_M_phase2.c" } } #else { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)<!M(:,j)>=A*B(:,j) //-------------------------------------------------------------- GB_GET_M_j ; // get M(:,j) if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural const int8_t *restrict Mjb = Mb + pM_start ; #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (Mjb [i]) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (mij) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } } else { // M(:,j) is sparse/hyper and scattered into Hf #include "GB_AxB_saxpy3_fineHash_notM_phase2.c" } } #endif } } // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (9, ttt) ; // ttt = omp_get_wtime ( ) ; //========================================================================== // phase3/phase4: count nnz(C(:,j)) for fine tasks, cumsum of Cp //========================================================================== GB_AxB_saxpy3_cumsum (C, SaxpyTasks, nfine, chunk, nthreads, Context) ; // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (10, ttt) ; // ttt = omp_get_wtime ( ) ; //========================================================================== // phase5: numeric phase for coarse tasks, gather for fine tasks //========================================================================== // C is iso for the ANY_PAIR semiring, and non-iso otherwise // allocate Ci and Cx int64_t cnz = Cp [cnvec] ; // set C->iso = GB_IS_ANY_PAIR_SEMIRING OK GrB_Info info = GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, GB_IS_ANY_PAIR_SEMIRING, Context) ; if (info != GrB_SUCCESS) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict Ci = C->i ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif ASSERT (C->i_size == GB_Global_memtable_size (C->i)) ; // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (11, ttt) ; // ttt = omp_get_wtime ( ) ; bool C_jumbled = false ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(||:C_jumbled) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- #if !GB_IS_ANY_PAIR_SEMIRING GB_CTYPE *restrict Hx = (GB_CTYPE *) SaxpyTasks [taskid].Hx ; #endif int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; bool task_C_jumbled = false ; if (taskid < nfine) { //------------------------------------------------------------------ // fine task: gather pattern and values //------------------------------------------------------------------ int64_t kk = SaxpyTasks [taskid].vector ; int team_size = SaxpyTasks [taskid].team_size ; int leader = SaxpyTasks [taskid].leader ; int my_teamid = taskid - leader ; int64_t pC = Cp [kk] ; if (use_Gustavson) { //-------------------------------------------------------------- // phase5: fine Gustavson task, C=A*B, C<M>=A*B, or C<!M>=A*B //-------------------------------------------------------------- // Hf [i] == 2 if C(i,j) is an entry in C(:,j) int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; int64_t cjnz = Cp [kk+1] - pC ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, my_teamid, team_size) ; if (cjnz == cvlen) { // C(:,j) is dense for (int64_t i = istart ; i < iend ; i++) { Ci [pC + i] = i ; } // copy Hx [istart:iend-1] into Cx [pC+istart:pC+iend-1] GB_CIJ_MEMCPY (pC + istart, istart, iend - istart) ; } else { // C(:,j) is sparse pC += SaxpyTasks [taskid].my_cjnz ; for (int64_t i = istart ; i < iend ; i++) { if (Hf [i] == 2) { GB_CIJ_GATHER (pC, i) ; // Cx [pC] = Hx [i] Ci [pC++] = i ; } } } } else { //-------------------------------------------------------------- // phase5: fine hash task, C=A*B, C<M>=A*B, C<!M>=A*B //-------------------------------------------------------------- // (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j), // and the index i of the entry is (Hf [hash] >> 2) - 1. int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, hash_size, my_teamid, team_size) ; pC += SaxpyTasks [taskid].my_cjnz ; for (int64_t hash = mystart ; hash < myend ; hash++) { int64_t hf = Hf [hash] ; if ((hf & 3) == 2) { int64_t i = (hf >> 2) - 1 ; // found C(i,j) in hash Ci [pC] = i ; GB_CIJ_GATHER (pC, hash) ; // Cx [pC] = Hx [hash] pC++ ; } } task_C_jumbled = true ; } } else { //------------------------------------------------------------------ // numeric coarse task: compute C(:,kfirst:klast) //------------------------------------------------------------------ int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t kfirst = SaxpyTasks [taskid].start ; int64_t klast = SaxpyTasks [taskid].end ; int64_t nk = klast - kfirst + 1 ; int64_t mark = 2*nk + 1 ; if (use_Gustavson) { //-------------------------------------------------------------- // phase5: coarse Gustavson task //-------------------------------------------------------------- #if ( GB_NO_MASK ) { // phase5: coarse Gustavson task, C=A*B #include "GB_AxB_saxpy3_coarseGus_noM_phase5.c" } #elif ( !GB_MASK_COMP ) { // phase5: coarse Gustavson task, C<M>=A*B #include "GB_AxB_saxpy3_coarseGus_M_phase5.c" } #else { // phase5: coarse Gustavson task, C<!M>=A*B #include "GB_AxB_saxpy3_coarseGus_notM_phase5.c" } #endif } else { //-------------------------------------------------------------- // phase5: coarse hash task //-------------------------------------------------------------- int64_t *restrict Hi = SaxpyTasks [taskid].Hi ; int64_t hash_bits = (hash_size-1) ; #if ( GB_NO_MASK ) { //---------------------------------------------------------- // phase5: coarse hash task, C=A*B //---------------------------------------------------------- // no mask present, or mask ignored (see below) #undef GB_CHECK_MASK_ij #include "GB_AxB_saxpy3_coarseHash_phase5.c" } #elif ( !GB_MASK_COMP ) { //---------------------------------------------------------- // phase5: coarse hash task, C<M>=A*B //---------------------------------------------------------- if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural #define GB_MASK_IS_BITMAP_AND_STRUCTURAL #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (!Mjb [i]) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (!mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } } else { // M is sparse and scattered into Hf #include "GB_AxB_saxpy3_coarseHash_M_phase5.c" } } #else { //---------------------------------------------------------- // phase5: coarse hash task, C<!M>=A*B //---------------------------------------------------------- if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural #define GB_MASK_IS_BITMAP_AND_STRUCTURAL #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (Mjb [i]) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } } else { // M is sparse and scattered into Hf #include "GB_AxB_saxpy3_coarseHash_notM_phase5.c" } } #endif } } C_jumbled = C_jumbled || task_C_jumbled ; } //-------------------------------------------------------------------------- // log the state of C->jumbled //-------------------------------------------------------------------------- C->jumbled = C_jumbled ; // C is jumbled if any task left it jumbled // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (12, ttt) ; } #undef GB_NO_MASK #undef GB_MASK_COMP
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(ast_type_traits::TK_IgnoreImplicitCastsAndParentheses, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(ast_type_traits::TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
munit.c
/* Copyright (c) 2013-2017 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (true)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL bool munit_error_jmp_buf_valid = false; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = true; } else { ret = false; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return true; } else { return false; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { struct PsnipClockTimespec wc = { 0, 0 }; munit_uint32_t seed, state; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; bool single_parameter_mode; void* user_data; MunitReport report; bool colorize; bool fork; bool show_stderr; bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (true); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, 0 }, wall_clock_end = { 0, 0 }; struct PsnipClockTimespec cpu_clock_begin = { 0, 0 }, cpu_clock_end = { 0, 0 }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = true; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = false; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = true; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = false; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = true; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = true; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = false; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return false; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = false; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = false; #if !defined(_WIN32) runner.fork = true; #else runner.fork = false; #endif runner.show_stderr = false; runner.fatal_failures = false; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = true; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = false; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = true; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = true; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = false; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = true; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, false, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, true, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
wgs64.c
#include <stdio.h> #include <omp.h> int n =64; int main(void) { int fail = 0; int a = -1; // #if 1 #pragma omp target { //nothing } #endif #pragma omp target teams distribute thread_limit(64) for (int k =0; k < n; k++) { // nothing } #pragma omp target teams distribute parallel for thread_limit(64) for (int k =0; k < n; k++) { // nothing } printf("Succeeded\n"); return fail; }
9600.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(1) { /* E := A*B */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(dynamic, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
pgas_addr.h
#pragma once #include <utility> #include <cassert> #include "adabs/adabs.h" #include "adabs/allocator.h" #include "adabs/tools/ptr_divider.h" #include "adabs/tools/alignment.h" #include "adabs/impl/pgas_addr.h" namespace adabs { template <typename T> struct allocator; namespace pgas { inline void pgas_addr_remote_get (gasnet_token_t token, gasnet_handlerarg_t arg0, // data pointer gasnet_handlerarg_t arg1, // data pointer gasnet_handlerarg_t arg2, // batch_mem size gasnet_handlerarg_t arg3, // return data pointer gasnet_handlerarg_t arg4, // return data pointer gasnet_handlerarg_t arg5 // flag diff for remote pointer ); inline void pgas_addr_remote_set (gasnet_token_t token, void *buf, size_t nbytes, gasnet_handlerarg_t arg0, // data pointer gasnet_handlerarg_t arg1 // data pointer ); inline void pgas_addr_set_uninit(gasnet_token_t token, gasnet_handlerarg_t arg0, // flag pointer gasnet_handlerarg_t arg1, // flag pointer gasnet_handlerarg_t arg2, // stride between flags gasnet_handlerarg_t arg3, // nb of flags gasnet_handlerarg_t arg4, // done marker pointer gasnet_handlerarg_t arg5 // done marker pointer ); inline void pgas_addr_check_get_all(gasnet_token_t token, gasnet_handlerarg_t arg0, // flag pointer gasnet_handlerarg_t arg1, // flag pointer gasnet_handlerarg_t arg2, // stride between flags gasnet_handlerarg_t arg3, // nb of flags gasnet_handlerarg_t arg4, // done marker pointer gasnet_handlerarg_t arg5 // done marker pointer ); // TODO move in different file (incl. implementation) inline void done_marker(gasnet_token_t token, gasnet_handlerarg_t arg0, // done marker pointer gasnet_handlerarg_t arg1 // done marker pointer ); } /** * A address in our pgas world */ template <typename T> class pgas_addr { /******************* TYPEDEFS ******************/ public: typedef T value_type; enum {EMPTY, WRITTING, REQUESTED, FULL}; /******************* VARIABLES *****************/ private: int _orig_node; int _batch_size; void* _orig_ptr; // just the plain start pointer mutable T* _cache; /**************** CON/DESTRUCTORS ***************/ public: pgas_addr (void* ptr, const int batch_size) : _orig_ptr(ptr), _orig_node(adabs::me), _batch_size(batch_size), _cache(0) { if (is_local()) { set_cache(); } } pgas_addr (void* ptr, const int batch_size, const int orig_node) : _orig_ptr(ptr), _orig_node(orig_node), _batch_size(batch_size), _cache(0) { if (is_local()) set_cache(); } pgas_addr (const pgas_addr<T> &copy) : _orig_ptr(copy._orig_ptr), _orig_node(copy._orig_node), _batch_size(copy._batch_size), _cache(0) { if (is_local()) set_cache(); } ~pgas_addr() { clear_cache(); } /***************** FUNCTIONS *********************/ public: T* get_data() const { using namespace adabs::tools; // must be called before is_available is called set_cache(); if (!is_local()) { const bool r = request(); if (r) { int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); GASNET_CALL(gasnet_AMRequestShort6(_orig_node, adabs::impl::PGAS_ADDR_GET, get_low(_orig_ptr), get_high(_orig_ptr), _batch_size * sizeof(T), get_low(_cache), get_high(_cache), a ) ) } } //std::cout << "waiting on flag " << (void*)get_flag() << " - local " << is_local() << std::endl; while (!is_available()) { } //std::cout << "waiting on flag " << (void*)get_flag() << " - local " << is_local() << " - done!" << std::endl; return _cache; } T* get_data_unitialized() { set_cache(); const bool w = writing(); assert(w); return _cache; } void set_data(T const * const data) { assert (data == _cache); using namespace adabs::tools; __sync_synchronize(); const bool a = available(); assert (a); //#pragma omp critical //if (is_local()) std::cout << "local set on flag " << (void*)get_flag() << " - " << *(int*)get_flag() << std::endl; if (!is_local()) { GASNET_CALL(gasnet_AMRequestLong2(_orig_node, adabs::impl::PGAS_ADDR_SET, _cache, sizeof(T)*_batch_size, (void*)(_orig_ptr), get_low(get_orig_flag()), get_high(get_orig_flag()) ) ) } } bool is_local() const { return (_orig_node == adabs::me); } pgas_addr<T>& operator+= (const int rhs) { int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); _orig_ptr = (void*)( (char*)_orig_ptr + (a + sizeof(T)*_batch_size)*rhs ); clear_cache(); set_cache(); return *this; } pgas_addr<T> operator+(const int rhs) const { int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); char* add = (char*)_orig_ptr; add += (a + sizeof(T)*_batch_size)*rhs; return pgas_addr<T>(add, _batch_size, _orig_node); } pgas_addr<T>& operator-= (const int rhs) { int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); _orig_ptr = (void*)( (char*)_orig_ptr - (a + sizeof(T)*_batch_size)*rhs ); clear_cache(); set_cache(); return *this; } pgas_addr<T> operator-(const int rhs) const { int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); char* add = (char*)_orig_ptr; add -= (a + sizeof(T)*_batch_size)*rhs; return pgas_addr<T>(add, _batch_size, _orig_node); } /** * This function call is unsafe, only use with care * We'll have to check on which occasions we use it and than * offer better solutions for these. */ void* get_raw_pointer() const { return _orig_ptr; } int get_node() const { return _orig_node; } int get_batch_size() const { return _batch_size; } void* get_orig_flag() const { //std::cout << "orig_ptr = " << _orig_ptr << std::endl; char* temp = (char*) _orig_ptr + sizeof(T)*_batch_size; return (void*)(temp); } int* get_flag() const { assert(_cache!=0); int a = tools::alignment<T>::val(); if (a<sizeof(int)) a = sizeof(int); return (int*)((char*)_cache + sizeof(T)*_batch_size); } private: void clear_cache() { if (!is_local()) { #pragma omp critical (pgas_cache) if (_cache!=0) { // not totally clean, allocated with adabs::allocator // but currently fine free(_cache); _cache = 0; } } } void set_cache() const { if (is_local()) { _cache = (T*)((char*)(_orig_ptr)); } else { #pragma omp critical (pgas_cache) { if (_cache == 0) _cache = allocator<T>::allocate(_batch_size, _batch_size).get_data_ptr(); } } } T* get_data_ptr() const { return _cache; } // check if flag is set to 1 bool is_writing() const { volatile int *reader = get_flag(); return (*reader == WRITTING); } // check if flag is set to 2 bool is_requested() const { volatile int *reader = get_flag(); return (*reader == REQUESTED); } // check if flag is set to 3 bool is_available() const { volatile int *reader = get_flag(); return (*reader == FULL); } // check and set flag to 1 bool writing() const { volatile int *ptr = get_flag(); return __sync_bool_compare_and_swap(ptr, EMPTY, WRITTING); } // check and set flag to 2 bool request() const { volatile int *ptr = get_flag(); return __sync_bool_compare_and_swap(ptr, EMPTY, REQUESTED); } // check and set flag to 3 bool available() const { volatile int *ptr = get_flag(); //#pragma omp critical //std::cout << me << ": avail 1 flag " << get_flag() << " - " << *ptr << std::endl; int val = __sync_lock_test_and_set(ptr, FULL); return (val == WRITTING || val == REQUESTED); } /****************** FRIEND CLASS **********************/ friend class allocator<T>; }; namespace pgas { inline void pgas_addr_set_uninit(gasnet_token_t token, gasnet_handlerarg_t arg0, // flag pointer gasnet_handlerarg_t arg1, // flag pointer gasnet_handlerarg_t arg2, // stride between flags gasnet_handlerarg_t arg3, // nb of flags gasnet_handlerarg_t arg4, // done marker pointer gasnet_handlerarg_t arg5 // done marker pointer ) { using namespace adabs::tools; int *flag = get_ptr<int>(arg0, arg1); for (int i=0; i<arg3; ++i) { int writting = __sync_val_compare_and_swap(flag, adabs::pgas_addr<void>::EMPTY, adabs::pgas_addr<void>::WRITTING); assert(writting==adabs::pgas_addr<void>::EMPTY); flag = (int*)((char*)flag + arg2); } int *return_marker = get_ptr<int>(arg4, arg5); if (return_marker != 0) { GASNET_CALL(gasnet_AMReplyShort2(token, adabs::impl::SET_RETURN_MARKER, arg4, arg5 ) ) } } /** * Pthread argument class */ struct pgas_addr_check_get_all_thread_arg { volatile int *flag; gasnet_handlerarg_t arg2; gasnet_handlerarg_t arg3; gasnet_handlerarg_t arg4; gasnet_handlerarg_t arg5; gasnet_node_t dest; pgas_addr_check_get_all_thread_arg(volatile int *_flag, gasnet_handlerarg_t _arg2, gasnet_handlerarg_t _arg3, gasnet_handlerarg_t _arg4, gasnet_handlerarg_t _arg5, gasnet_node_t _dest ) : flag(_flag), arg2(_arg2), arg3(_arg3), arg4(_arg4), arg5(_arg5), dest(_dest) {} }; inline void* pgas_addr_check_get_all_thread(void *threadarg) { using namespace adabs::tools; pgas_addr_check_get_all_thread_arg* arg = (pgas_addr_check_get_all_thread_arg*)threadarg; for (int i=0; i<arg->arg3; ++i) { // wait until flag is set volatile int* reader = arg->flag; while (*reader != adabs::pgas_addr<void>::FULL) {} arg->flag = (volatile int*)((char*)arg->flag + arg->arg2); } int *return_marker = get_ptr<int>(arg->arg4, arg->arg5); if (return_marker != 0) { GASNET_CALL(gasnet_AMRequestShort2(arg->dest, adabs::impl::SET_RETURN_MARKER, arg->arg4, arg->arg5 ) ) } delete arg; pthread_exit(0); } inline void pgas_addr_check_get_all(gasnet_token_t token, gasnet_handlerarg_t arg0, // flag pointer gasnet_handlerarg_t arg1, // flag pointer gasnet_handlerarg_t arg2, // stride between flags gasnet_handlerarg_t arg3, // nb of flags gasnet_handlerarg_t arg4, // done marker pointer gasnet_handlerarg_t arg5 // done marker pointer ) { using namespace adabs::tools; volatile int *flag = get_ptr<volatile int>(arg0, arg1); gasnet_node_t src; GASNET_CALL( gasnet_AMGetMsgSource(token, &src) ) pgas_addr_check_get_all_thread_arg *para = new pgas_addr_check_get_all_thread_arg(flag, arg2, arg3, arg4, arg5, src); pthread_t thread_id; pthread_attr_t attrb; pthread_attr_init(&attrb); pthread_attr_setdetachstate(&attrb, PTHREAD_CREATE_DETACHED); pthread_create(&thread_id, &attrb, pgas_addr_check_get_all_thread, (void*) para); } inline void done_marker(gasnet_token_t token, gasnet_handlerarg_t arg0, // done marker pointer gasnet_handlerarg_t arg1 // done marker pointer ) { using namespace adabs::tools; int *return_marker = get_ptr<int>(arg0, arg1); int val = __sync_lock_test_and_set(return_marker, 1); assert (val == 0); } inline void pgas_addr_remote_set (gasnet_token_t token, void *buf, size_t nbytes, gasnet_handlerarg_t arg0, // data pointer gasnet_handlerarg_t arg1 // data pointer ) { using namespace adabs::tools; int *flag = get_ptr<int>(arg0, arg1); //#pragma omp critical //std::cout << gasnet_mynode() << " remote set receviced flag: " << flag << " data: " << buf << " - " << (void*)((char*)buf + nbytes) << std::endl; __sync_synchronize(); int val = __sync_lock_test_and_set(flag, adabs::pgas_addr<void>::FULL); assert(val != adabs::pgas_addr<void>::FULL); } inline void pgas_addr_remote_get (gasnet_token_t token, gasnet_handlerarg_t arg0, // data pointer gasnet_handlerarg_t arg1, // data pointer gasnet_handlerarg_t arg2, // batch_mem size gasnet_handlerarg_t arg3, // return data pointer gasnet_handlerarg_t arg4, // return data pointer gasnet_handlerarg_t arg5 // flag diff for remote pointer ) { using namespace adabs::tools; using namespace adabs::impl; void *local = get_ptr<void>(arg0, arg1); void *remote = get_ptr<void>(arg3, arg4); gasnet_node_t src; GASNET_CALL( gasnet_AMGetMsgSource(token, &src) ) #pragma omp critical (global_requests) global_requests.insert(global_requests.begin(), remote_get_thread_arg(local, arg2, remote, src, arg5)); } } }
GB_binop__gt_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__gt_bool) // A.*B function (eWiseMult): GB (_AemultB_03__gt_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_bool) // A*D function (colscale): GB (_AxD__gt_bool) // D*A function (rowscale): GB (_DxB__gt_bool) // C+=B function (dense accum): GB (_Cdense_accumB__gt_bool) // C+=b function (dense accum): GB (_Cdense_accumb__gt_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_bool) // C=scalar+B GB (_bind1st__gt_bool) // C=scalar+B' GB (_bind1st_tran__gt_bool) // C=A+scalar GB (_bind2nd__gt_bool) // C=A'+scalar GB (_bind2nd_tran__gt_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_BOOL || GxB_NO_GT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__gt_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__gt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pairwise_align.c
/* This file is part of SSCA1. Copyright (C) 2008-2015, UT-Battelle, LLC. This product includes software produced by UT-Battelle, LLC under Contract No. DE-AC05-00OR22725 with the Department of Energy. This program is free software; you can redistribute it and/or modify it under the terms of the New BSD 3-clause software license (LICENSE). This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE for more details. For more information please contact the SSCA1 developers at: bakermb@ornl.gov */ #define _BSD_SOURCE #include <stdio.h> #include <stdlib.h> #include <sort.h> #include <pairwise_align.h> #include <string.h> #include <util.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif typedef struct { index_t *goodEnds[2]; score_t *goodScores; int report; int size; score_t min_score; } current_ends_t; static index_t unsigned_abs_diff(index_t A, index_t B) { return (A > B) ? (A - B) : (B - A); } void print_back_20(good_match_t *A, seq_t *in_seq, index_t end_index){ index_t start = end_index - 20; char main_acid_chain[21]; char main_codon_chain[61]; seq_t this_seq; this_seq.sequence = in_seq->sequence+start; this_seq.length = 20; memset(main_acid_chain, '\0', 21); memset(main_codon_chain, '\0', 21); assemble_acid_chain(A, main_acid_chain, &this_seq, 20); assemble_codon_chain(A, main_codon_chain, &this_seq, 20); printf("%7ld %s %s %7ld\n", start, main_acid_chain, main_codon_chain, end_index); } static void considerAdding(score_t score, int minSeparation, index_t main_index, index_t match_index, int maxReports, current_ends_t *score_ends) { //first scan the list to see if there is a match already that is closer for(int idx=0; idx < score_ends->report; idx++){ if(unsigned_abs_diff(score_ends->goodEnds[0][idx], main_index) < minSeparation || unsigned_abs_diff(score_ends->goodEnds[1][idx], match_index) < minSeparation) { if(score_ends->goodScores[idx] < score) { score_ends->goodEnds[0][idx] = main_index; score_ends->goodEnds[1][idx] = match_index; score_ends->goodScores[idx] = score; return; } else { return; } } } //enlarge if needed if(score_ends->report == score_ends->size) { index_t worst_keeper, new_best_index=0; index_t *index_array = NULL; score_t *sorted_array = NULL; index_t *best_index = NULL; if(index_array == NULL) index_array = (index_t *)malloc(score_ends->size*sizeof(index_t)); if(sorted_array == NULL) sorted_array = (score_t *)malloc(score_ends->size*sizeof(score_t)); if(best_index == NULL) best_index = (index_t *)malloc(score_ends->size*sizeof(index_t)); memcpy(sorted_array, score_ends->goodScores, sizeof(score_t)*score_ends->report); index_sort(sorted_array, index_array, score_ends->report); worst_keeper = score_ends->size - maxReports; score_ends->min_score = score_ends->goodScores[best_index[worst_keeper]]; for(int index_for_index=worst_keeper; index_for_index < score_ends->size; index_for_index++) { best_index[new_best_index] = index_array[index_for_index]; new_best_index++; } sort(best_index, new_best_index); for(int idx=0; idx < new_best_index; idx++) { score_ends->goodScores[idx] =score_ends->goodScores[best_index[idx]]; score_ends->goodEnds[0][idx]=score_ends->goodEnds[0][best_index[idx]]; score_ends->goodEnds[1][idx]=score_ends->goodEnds[1][best_index[idx]]; } score_ends->report = maxReports; free(index_array); free(sorted_array); free(best_index); } score_ends->goodEnds[0][score_ends->report] = main_index; score_ends->goodEnds[1][score_ends->report] = match_index; score_ends->goodScores[score_ends->report] = score; score_ends->report++; } /* release_good_match: * Free the goot_match_t structure generated by Kernel 1 and Kernel 2 * Note: You can still release a matrix that has not been through Kernel 2, * there are not ill side effects. * Input: * good_match_t *doomed - the structure to be freed * Output: * None */ void release_good_match(good_match_t *doomed) { if(doomed==NULL) return; free(doomed->goodScores); free(doomed->goodEnds[1]); free(doomed->goodEnds[0]); free(doomed->bestStarts[0]); free(doomed->bestStarts[1]); free(doomed->bestEnds[0]); free(doomed->bestEnds[1]); free(doomed->bestScores); for(int idx=0; idx<doomed->bestLength; idx++) { free_local_seq(doomed->bestSeqs[idx].main); free_local_seq(doomed->bestSeqs[idx].match); } free(doomed->bestSeqs); free(doomed); } //typedef score_t score_matrix_t; typedef struct { score_t *scores; index_t length; index_t local_length; } score_matrix_t; typedef score_matrix_t gap_matrix_t; static score_matrix_t *alloc_score_matrix(index_t matrix_length){ score_matrix_t *new_alloc = (score_matrix_t *)malloc(sizeof(score_matrix_t)); assert(new_alloc != NULL); new_alloc->length = matrix_length; new_alloc->local_length = (matrix_length / num_nodes); malloc_all(sizeof(score_t)*3*new_alloc->local_length, (void **)&new_alloc->scores); assert(new_alloc->scores != NULL); touch_memory(new_alloc->scores, sizeof(score_t)*3*new_alloc->local_length); return new_alloc; } static gap_matrix_t *alloc_gap_matrix(index_t matrix_length){ gap_matrix_t *new_alloc = (gap_matrix_t *)malloc(sizeof(score_matrix_t)); assert(new_alloc != NULL); new_alloc->length = matrix_length; new_alloc->local_length = (matrix_length / num_nodes); malloc_all(sizeof(score_t)*2*new_alloc->local_length, (void **)&new_alloc->scores); assert(new_alloc->scores != NULL); touch_memory(new_alloc->scores, sizeof(score_t)*2*new_alloc->local_length); return new_alloc; } static void free_score_matrix(score_matrix_t *doomed){ FREE_ALL(doomed->scores); free(doomed); } static void free_gap_matrix(gap_matrix_t *doomed){ FREE_ALL(doomed->scores); free(doomed); } #define index2d(x,y,stride) ((y) + ((x) * (stride))) static void fetch_score(score_matrix_t *A, index_t m, index_t n, score_t *in){ int target_ep = n / A->local_length; int local_index = n % A->local_length; SHORT_GET(in, &(A->scores[index2d(m%3,local_index,A->local_length)]), 1, target_ep); } static void fetch_gap(gap_matrix_t *A, index_t m, index_t n, score_t *in){ int target_ep = n / A->local_length; int local_index = n %A->local_length; SHORT_GET(in, &(A->scores[index2d(m%2,local_index,A->local_length)]), 1, target_ep); } /* maybe useful for going to further extremes. Commented out because GCC complains of unused functions */ #if 0 static void fetch_score_nb(score_matrix_t *A, index_t m, index_t n, score_t *in){ int target_ep = n / A->local_length; int local_index = n % A->local_length; SHORT_GET_NB((short*)in, &(A->scores[index2d(m%3,local_index,A->local_length)]), 1, target_ep); } static void fetch_gap_nb(gap_matrix_t *A, index_t m, index_t n, score_t *in){ int target_ep = n / A->local_length; int local_index = n %A->local_length; SHORT_GET_NB((short*)in, &(A->scores[index2d(m%2,local_index,A->local_length)]), 1, target_ep); } #endif static void assign_score(score_matrix_t *A, index_t m, index_t n, score_t new_value){ int target_ep = n / A->local_length; int local_index = n %A->local_length; SHORT_PUT(&(A->scores[index2d(m%3,local_index,A->local_length)]), &new_value, 1, target_ep); } static void assign_gap(score_matrix_t *A, index_t m, index_t n, score_t new_value){ int target_ep = n / A->local_length; int local_index = n % A->local_length; SHORT_PUT(&(A->scores[index2d(m%2,local_index,A->local_length)]), &new_value, 1, target_ep); } #ifdef USE_SHMEM long collect_pSync[_SHMEM_REDUCE_SYNC_SIZE]; int collect_pWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; #endif static void collect_best_results(current_ends_t **good_ends, int max_reports, int max_threads, good_match_t *answer){ index_t copied=0; int max_values=0; current_ends_t collected_ends, current_end; if(rank != 0) return; memset(answer->goodEnds[0], 0, sizeof(index_t)*max_reports); memset(answer->goodEnds[1], 0, sizeof(index_t)*max_reports); memset(answer->goodScores, 0, sizeof(score_t)*max_reports); collected_ends.goodScores = malloc(sizeof(score_t)*good_ends[0]->size*num_nodes); collected_ends.goodEnds[0] = malloc(sizeof(index_t)*good_ends[0]->size*num_nodes); collected_ends.goodEnds[1] = malloc(sizeof(index_t)*good_ends[0]->size*num_nodes); for(int idx=0; idx < num_nodes; idx++){ GETMEM(&current_end, good_ends[0], sizeof(current_ends_t), idx); SHORT_GET(&(collected_ends.goodScores[copied]), good_ends[0]->goodScores, current_end.report, idx); LONG_GET(&(collected_ends.goodEnds[0][copied]), good_ends[0]->goodEnds[0], current_end.report, idx); LONG_GET(&(collected_ends.goodEnds[1][copied]), good_ends[0]->goodEnds[1], current_end.report, idx); copied += current_end.report; } sort_ends_t *sorted_list = malloc(sizeof(sort_ends_t)*copied); for(int idx=0; idx < copied; idx++){ sorted_list[idx].score = collected_ends.goodScores[idx]; sorted_list[idx].main_end = collected_ends.goodEnds[0][idx]; sorted_list[idx].match_end = collected_ends.goodEnds[1][idx]; } ends_sort(sorted_list, copied); if(copied > max_reports){ max_values = max_reports; } else { max_values = copied; } for(int idx=0; idx < max_values; idx++){ answer->goodScores[idx] = sorted_list[idx].score; answer->goodEnds[0][idx] = sorted_list[idx].main_end; answer->goodEnds[1][idx] = sorted_list[idx].match_end; } free(sorted_list); answer->numReports = max_values; } /* pairwise_align * real meat of the program, this function finds codon similarities in seq_data using the matrix sim_matrix * Input: * seq_data_t *seq_data - Sequence data generated by genScalData() * sim_matrix_t *sim_matrix - Codon similarity matrix generated by genSimMatrix() * int minScore - Minimum end point score, from the init_parameters() function * int maxReports - Maximum number of reports to keep, from the init_parameters() function * int minSeparation - Minimum end point seperation in codons, from the init_parameters() function * * Output: * good_matrix_t * - a matrix of good matches * ->simMatrix - a pointer to the sim_matrix_t used * ->seqData - a pointer to the seq_data_t used * ->goodEnds - a [2][maxReports] matrix with main/match endpoints * ->goodScores - a [maxReports] good scores for upto maxReports endpoints * ->numReports - an integer, the number of reports represented */ good_match_t *pairwise_align(seq_data_t *seq_data, sim_matrix_t *sim_matrix, const int minScore, const int maxReports, const int minSeparation) { const int sortReports = maxReports * 10; const seq_t *main_seq = seq_data->main; const seq_t *match_seq = seq_data->match; const score_t gapExtend = sim_matrix->gapExtend; const score_t gapFirst = sim_matrix->gapStart + gapExtend; const index_t main_len = seq_data->main->length; codon_t current_main, current_match; codon_t next_main, next_match; const int max_threads = omp_get_max_threads(); current_ends_t **good_ends = (current_ends_t **)malloc(sizeof(current_ends_t *)*max_threads); #ifdef _OPENMP #pragma omp parallel for #endif for(int jdx=0; jdx < max_threads; jdx++) { int idx = omp_get_thread_num(); malloc_all(sizeof(current_ends_t), (void **)&good_ends[idx]); good_ends[idx]->size = sortReports; good_ends[idx]->report = 0; malloc_all(sizeof(score_t)*sortReports, (void **)&good_ends[idx]->goodScores); malloc_all(sizeof(index_t)*sortReports, (void **)&good_ends[idx]->goodEnds[0]); malloc_all(sizeof(index_t)*sortReports, (void **)&good_ends[idx]->goodEnds[1]); good_ends[idx]->min_score = minScore; } score_matrix_t *restrict score_matrix = alloc_score_matrix(seq_data->match->length); gap_matrix_t *restrict main_gap_matrix = alloc_gap_matrix(seq_data->match->length); gap_matrix_t *restrict match_gap_matrix = alloc_gap_matrix(seq_data->match->length); index_t score_start, score_end; score_t G, W, E, F, cmp_a, cmp_b, cmp_c, new_score, next_G, next_F, next_E; index_t m, n; good_match_t *answer; codon_t main_codon; codon_t match_codon; index_t local_main_start = seq_data->main->local_size * rank; index_t local_main_end = seq_data->main->local_size * (rank+1) - 1; /* if(rank == 0){ printf("Ready to debug on PID=%i\n", getpid()); int gogogo=0; while(gogogo==0){} } */ //First iteration, done by hand. Basically idx=0 in the big loop if(rank == 0){ fetch_from_seq(main_seq,0,&main_codon); fetch_from_seq(match_seq,0,&match_codon); W = sim_matrix->similarity[main_codon][match_codon]; assign_score(score_matrix,0,0,0 > W ? 0 : W); assign_gap(main_gap_matrix,0,0,-gapFirst + W); assign_gap(match_gap_matrix,0,0,-gapFirst + W); //idx=1 m=0,1 n =1,0 fetch_from_seq(main_seq,0,&main_codon); fetch_from_seq(match_seq,1, &match_codon); W = sim_matrix->similarity[main_codon][match_codon]; G = W; fetch_gap(main_gap_matrix,0,0,&E); cmp_a = 0 > E ? 0 : E; cmp_a = cmp_a > G ? cmp_a : G; assign_score(score_matrix,1,1,cmp_a); cmp_a = E - gapExtend; cmp_b = G - gapFirst; assign_gap(main_gap_matrix,1,0,cmp_a > cmp_b ? cmp_a : cmp_b); assign_gap(match_gap_matrix,1,0,-gapFirst > cmp_b ? -gapFirst : cmp_b); fetch_from_seq(main_seq,1,&main_codon); fetch_from_seq(match_seq,0,&match_codon); W = sim_matrix->similarity[main_codon][match_codon]; G = W; fetch_gap(match_gap_matrix,0,0, &F); cmp_a = 0 > F ? 0 : F; cmp_a = cmp_a > G ? cmp_a : G; assign_score(score_matrix,1,0,cmp_a); cmp_a = F - gapExtend; cmp_b = G - gapFirst; assign_gap(main_gap_matrix,1,1,-gapFirst > cmp_b ? -gapFirst : cmp_b); assign_gap(match_gap_matrix,1,1,cmp_a > cmp_b ? cmp_a : cmp_b); } for(index_t idx=2; idx < seq_data->match->length * 2 - 1; idx++) { BARRIER_ALL(); score_start = idx > (seq_data->match->length - 1) ? (idx-(seq_data->match->length-1)) : 0; score_end = idx < (seq_data->match->length-1) ? (idx) : (seq_data->match->length-1); if(idx < seq_data->match->length) { if(rank == 0){ m = 0; n = idx; fetch_from_seq(main_seq,m,&main_codon); fetch_from_seq(match_seq,n,&match_codon); W = sim_matrix->similarity[main_codon][match_codon]; G = W; fetch_gap(match_gap_matrix,idx-1,n-1,&F); cmp_a = F > 0 ? F : 0; cmp_a = cmp_a > G ? cmp_a : G; assign_score(score_matrix,idx,m,cmp_a); new_score= cmp_a; if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){ if (m+1 == seq_data->main->length || n == 0) { considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } else { fetch_from_seq(main_seq, m+1, &next_main); fetch_from_seq(match_seq, n-1, &next_match); if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){ considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } } } cmp_a = F - gapExtend; cmp_b = G - gapFirst; assign_gap(match_gap_matrix,idx,n,cmp_a > cmp_b ? cmp_a : cmp_b); m = idx; n = 0; fetch_from_seq(main_seq,m,&main_codon); fetch_from_seq(match_seq,n,&match_codon); W = sim_matrix->similarity[main_codon][match_codon]; G = W; fetch_gap(main_gap_matrix, idx-1, m-1, &E); cmp_a = E > 0 ? E : 0; cmp_a = cmp_a > G ? cmp_a : G; assign_score(score_matrix,idx,m,cmp_a); new_score = cmp_a; if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){ if (m+1 == seq_data->main->length || n == 0) { considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } else { fetch_from_seq(main_seq, m+1, &next_main); fetch_from_seq(match_seq, n-1, &next_match); if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){ considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } } } cmp_a = E - gapExtend; cmp_b = G - gapFirst; assign_gap(main_gap_matrix, idx, m, cmp_a > cmp_b ? cmp_a : cmp_b); } score_start++; score_end = score_end - 1; } index_t local_start, local_end; if(score_end < local_main_start && score_start > local_main_end){ local_start = 1; local_end = 0; } else { if(local_main_start > score_start){ local_start = local_main_start; } else { local_start = score_start; } if(score_end > local_main_end){ local_end = local_main_end; } else { local_end = score_end; } fetch_from_seq(main_seq,local_start,&next_main); fetch_from_seq(match_seq,idx - score_start,&next_match); fetch_score(score_matrix, (idx-2)%3, local_start-1, &next_G); fetch_gap(match_gap_matrix, idx-1, idx - (score_start+1), &next_F); fetch_gap(main_gap_matrix, idx-1, local_start-1, &next_E); } //As a note, this loop is the program execution time. If you're looking to optimize this benchmark, this is all that counts. for(index_t antidiagonal = local_start; antidiagonal <= local_end; antidiagonal++) { m = antidiagonal; n = idx - m; #ifdef USE_PREFETCH current_main = next_main; current_match = next_match; G = next_G; F = next_F; E = next_E; if (m < (seq_data->main->length-1)) fetch_from_seq_nb(main_seq, m+1, &next_main); if (n > 0) fetch_from_seq_nb(match_seq, n-1, &next_match); if (n > 1) fetch_gap(match_gap_matrix, idx-1, n-2, &next_F); fetch_gap(main_gap_matrix, idx-1, m, &next_E); fetch_score(score_matrix, (idx-2)%3, m, &next_G); #else fetch_from_seq(main_seq, m, &current_main); fetch_from_seq(match_seq, n, &current_match); fetch_gap(match_gap_matrix, idx-1, n-1, &F); fetch_gap(main_gap_matrix, idx-1, m-1, &E); fetch_score(score_matrix, (idx-2)%3, m-1, &G); #endif cmp_a = 0; cmp_a = cmp_a > E ? cmp_a : E; cmp_a = cmp_a > F ? cmp_a : F; W = sim_matrix->similarity[current_main][current_match]; G += W; new_score = cmp_a > G ? cmp_a : G; if((new_score > good_ends[omp_get_thread_num()]->min_score && W > 0 && new_score == G)){ #ifdef USE_PREFETCH if (m+1 == seq_data->main->length || n == 0) { considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } else { WAIT_NB(); if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){ considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } } #else if (m+1 == seq_data->main->length || n == 0) { considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } else { fetch_from_seq(main_seq, m+1, &next_main); fetch_from_seq(match_seq, n-1, &next_match); if((m == main_len - 1) || (n == 0) || sim_matrix->similarity[next_main][next_match] <= 0){ considerAdding(new_score, minSeparation, m, n, maxReports, good_ends[omp_get_thread_num()]); } } #endif } cmp_a = E - gapExtend; cmp_b = G - gapFirst; cmp_c = F - gapExtend; #ifdef USE_PREFETCH WAIT_NB(); #endif assign_score(score_matrix,idx,m,new_score); assign_gap(main_gap_matrix, idx, m, cmp_a > cmp_b ? cmp_a : cmp_b); assign_gap(match_gap_matrix, idx, n, cmp_c > cmp_b ? cmp_c : cmp_b); } } answer = (good_match_t*)malloc(sizeof(good_match_t)); answer->simMatrix = sim_matrix; answer->seqData = seq_data; answer->goodEnds[0] = (index_t*)malloc(sizeof(index_t)*maxReports); answer->goodEnds[1] = (index_t*)malloc(sizeof(index_t)*maxReports); answer->goodScores = (score_t*)malloc(sizeof(score_t)*maxReports); answer->bestEnds[0] = NULL; answer->bestStarts[0] = NULL; answer->bestEnds[1] = NULL; answer->bestStarts[1] = NULL; answer->bestSeqs = NULL; answer->bestScores = NULL; BARRIER_ALL(); collect_best_results(good_ends, maxReports, max_threads, answer); free_score_matrix(score_matrix); free_gap_matrix(main_gap_matrix); free_gap_matrix(match_gap_matrix); for(int idx=0; idx < max_threads; idx++) { FREE_ALL(good_ends[idx]->goodScores); FREE_ALL(good_ends[idx]->goodEnds[0]); FREE_ALL(good_ends[idx]->goodEnds[1]); FREE_ALL(good_ends[idx]); } free(good_ends); return answer; }
ompfor4.c
/* * Dynamic schedule */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int a[20]; int foo(int lower, int upper, int stride) { int i; #pragma omp for schedule(dynamic) for (i=lower;i>upper;i-=stride) { a[i]=i*2; printf("Iteration %2d is carried out by thread %2d\n",\ i, omp_get_thread_num()); } } int main(void) { #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); foo(0,20,3); } return 0; }
GB_unaryop__minv_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_fp64 // op(A') function: GB_tran__minv_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_fp64 ( uint64_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
onyx_if.c
/* * Copyright (c) 2010 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "onyxc_int.h" #include "onyx_int.h" #include "systemdependent.h" #include "quantize.h" #include "alloccommon.h" #include "mcomp.h" #include "firstpass.h" #include "psnr.h" #include "vpx_scale/vpxscale.h" #include "extend.h" #include "ratectrl.h" #include "quant_common.h" #include "segmentation.h" #include "g_common.h" #include "vpx_scale/yv12extend.h" #include "postproc.h" #include "vpx_mem/vpx_mem.h" #include "swapyv12buffer.h" #include "threading.h" #include "vpx_ports/vpx_timer.h" #include "vpxerrors.h" #include "temporal_filter.h" #if ARCH_ARM #include "vpx_ports/arm.h" #endif #include <math.h> #include <stdio.h> #include <limits.h> #if CONFIG_RUNTIME_CPU_DETECT #define IF_RTCD(x) (x) #define RTCD(x) &cpi->common.rtcd.x #else #define IF_RTCD(x) NULL #define RTCD(x) NULL #endif extern void vp8cx_init_mv_bits_sadcost(); extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val); extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); extern void vp8_init_loop_filter(VP8_COMMON *cm); extern void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val); extern void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd, int filt_val, int sharpness_lvl); extern void vp8_dmachine_specific_config(VP8_COMP *cpi); extern void vp8_cmachine_specific_config(VP8_COMP *cpi); extern void vp8_calc_auto_iframe_target_size(VP8_COMP *cpi); extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag); extern void print_parms(VP8_CONFIG *ocf, char *filenam); extern unsigned int vp8_get_processor_freq(); extern void print_tree_update_probs(); extern void vp8cx_create_encoder_threads(VP8_COMP *cpi); extern void vp8cx_remove_encoder_threads(VP8_COMP *cpi); #if HAVE_ARMV7 extern void vp8_yv12_copy_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc); extern void vp8_yv12_copy_src_frame_func_neon(YV12_BUFFER_CONFIG *src_ybc, YV12_BUFFER_CONFIG *dst_ybc); #endif int vp8_estimate_entropy_savings(VP8_COMP *cpi); int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd); int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd); static void set_default_lf_deltas(VP8_COMP *cpi); extern const int vp8_gf_interval_table[101]; #if CONFIG_PSNR #include "math.h" extern double vp8_calc_ssim ( YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, int lumamask, double *weight ); extern double vp8_calc_ssimg ( YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v ); #endif #ifdef OUTPUT_YUV_SRC FILE *yuv_file; #endif #if 0 FILE *framepsnr; FILE *kf_list; FILE *keyfile; #endif #if 0 extern int skip_true_count; extern int skip_false_count; #endif #ifdef ENTROPY_STATS extern int intra_mode_stats[10][10][10]; #endif #ifdef SPEEDSTATS unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned int tot_pm = 0; unsigned int cnt_pm = 0; unsigned int tot_ef = 0; unsigned int cnt_ef = 0; #endif #ifdef MODE_STATS extern unsigned __int64 Sectionbits[50]; extern int y_modes[5] ; extern int uv_modes[4] ; extern int b_modes[10] ; extern int inter_y_modes[10] ; extern int inter_uv_modes[4] ; extern unsigned int inter_b_modes[15]; #endif extern void (*vp8_short_fdct4x4)(short *input, short *output, int pitch); extern void (*vp8_short_fdct8x4)(short *input, short *output, int pitch); extern const int vp8_bits_per_mb[2][QINDEX_RANGE]; extern const int qrounding_factors[129]; extern const int qzbin_factors[129]; extern void vp8cx_init_quantizer(VP8_COMP *cpi); extern const int vp8cx_base_skip_false_prob[128]; // Tables relating active max Q to active min Q static const int kf_low_motion_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,32,33,34,35,36,37,38, }; static const int kf_high_motion_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34, 35,35,36,36,37,38,39,40,41,42,43,44,45,46,47,48, }; /*static const int kf_minq[QINDEX_RANGE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,10,11,11,12,12,13,13,14,14, 15,15,16,16,17,17,18,18,19,19,20,20,21,21,22,22, 23,23,24,24,25,25,26,26,27,27,28,28,29,29,30,30, 31,31,32,32,33,33,34,34,35,35,36,36,37,37,38,38 };*/ static const int gf_low_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2, 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6, 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10, 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34, 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42, 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58 }; static const int gf_mid_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14, 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21, 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29, 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37, 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48, 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64, }; static const int gf_high_motion_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16, 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54, 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80, }; /*static const int gf_arf_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4, 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, 9,10,10,10,11,11,11,12,12,12,13,13,13,14,14,14, 15,15,16,16,17,17,18,18,19,19,20,20,21,21,22,22, 23,23,24,24,25,25,26,26,27,27,28,28,29,29,30,30, 31,31,32,32,33,33,34,34,35,35,36,36,37,37,38,39, 39,40,40,41,41,42,42,43,43,44,45,46,47,48,49,50, 51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66 };*/ static const int inter_minq[QINDEX_RANGE] = { 0,0,0,0,1,1,2,3,3,4,4,5,6,6,7,7, 8,8,9,9,10,11,11,12,12,13,13,14,14,15,15,16, 16,17,17,17,18,18,19,19,20,20,21,21,22,22,22,23, 23,24,24,24,25,25,26,27,28,28,29,30,31,32,33,34, 35,35,36,37,38,39,39,40,41,42,43,43,44,45,46,47, 47,48,49,49,51,52,53,54,54,55,56,56,57,57,58,58, 59,59,60,61,61,62,62,63,64,64,65,66,67,67,68,69, 69,70,71,71,72,73,74,75,76,76,77,78,79,80,81,81, }; void vp8_initialize() { static int init_done = 0; if (!init_done) { vp8_scale_machine_specific_config(); vp8_initialize_common(); //vp8_dmachine_specific_config(); vp8_tokenize_initialize(); vp8cx_init_mv_bits_sadcost(); init_done = 1; } } #ifdef PACKET_TESTING extern FILE *vpxlogc; #endif static void setup_features(VP8_COMP *cpi) { // Set up default state for MB feature flags cpi->mb.e_mbd.segmentation_enabled = 0; cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; vpx_memset(cpi->mb.e_mbd.mb_segment_tree_probs, 255, sizeof(cpi->mb.e_mbd.mb_segment_tree_probs)); vpx_memset(cpi->mb.e_mbd.segment_feature_data, 0, sizeof(cpi->mb.e_mbd.segment_feature_data)); cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); vpx_memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); set_default_lf_deltas(cpi); } void vp8_dealloc_compressor_data(VP8_COMP *cpi) { // Delete sementation map if (cpi->segmentation_map != 0) vpx_free(cpi->segmentation_map); cpi->segmentation_map = 0; if (cpi->active_map != 0) vpx_free(cpi->active_map); cpi->active_map = 0; // Delete first pass motion map if (cpi->fp_motion_map != 0) vpx_free(cpi->fp_motion_map); cpi->fp_motion_map = 0; vp8_de_alloc_frame_buffers(&cpi->common); vp8_yv12_de_alloc_frame_buffer(&cpi->last_frame_uf); vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source); #if VP8_TEMPORAL_ALT_REF vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer.source_buffer); #endif { int i; for (i = 0; i < MAX_LAG_BUFFERS; i++) vp8_yv12_de_alloc_frame_buffer(&cpi->src_buffer[i].source_buffer); cpi->source_buffer_count = 0; } vpx_free(cpi->tok); cpi->tok = 0; // Structure used to minitor GF useage if (cpi->gf_active_flags != 0) vpx_free(cpi->gf_active_flags); cpi->gf_active_flags = 0; if(cpi->mb.pip) vpx_free(cpi->mb.pip); cpi->mb.pip = 0; vpx_free(cpi->total_stats); vpx_free(cpi->this_frame_stats); } static void enable_segmentation(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Set the appropriate feature bit cpi->mb.e_mbd.segmentation_enabled = 1; cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } static void disable_segmentation(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Clear the appropriate feature bit cpi->mb.e_mbd.segmentation_enabled = 0; } // Valid values for a segment are 0 to 3 // Segmentation map is arrange as [Rows][Columns] static void set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map) { VP8_COMP *cpi = (VP8_COMP *)(ptr); // Copy in the new segmentation map vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols)); // Signal that the map should be updated. cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } // The values given for each segment can be either deltas (from the default value chosen for the frame) or absolute values. // // Valid range for abs values is (0-127 for MB_LVL_ALT_Q) , (0-63 for SEGMENT_ALT_LF) // Valid range for delta values are (+/-127 for MB_LVL_ALT_Q) , (+/-63 for SEGMENT_ALT_LF) // // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use the absolute values given). // // static void set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta) { VP8_COMP *cpi = (VP8_COMP *)(ptr); cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta; vpx_memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data)); } static void segmentation_test_function(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *)(ptr); unsigned char *seg_map; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; // Create a temporary map for segmentation data. CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); // MB loop to set local segmentation map /*for ( i = 0; i < cpi->common.mb_rows; i++ ) { for ( j = 0; j < cpi->common.mb_cols; j++ ) { //seg_map[(i*cpi->common.mb_cols) + j] = (j % 2) + ((i%2)* 2); //if ( j < cpi->common.mb_cols/2 ) // Segment 1 around the edge else 0 if ( (i == 0) || (j == 0) || (i == (cpi->common.mb_rows-1)) || (j == (cpi->common.mb_cols-1)) ) seg_map[(i*cpi->common.mb_cols) + j] = 1; //else if ( (i < 2) || (j < 2) || (i > (cpi->common.mb_rows-3)) || (j > (cpi->common.mb_cols-3)) ) // seg_map[(i*cpi->common.mb_cols) + j] = 2; //else if ( (i < 5) || (j < 5) || (i > (cpi->common.mb_rows-6)) || (j > (cpi->common.mb_cols-6)) ) // seg_map[(i*cpi->common.mb_cols) + j] = 3; else seg_map[(i*cpi->common.mb_cols) + j] = 0; } }*/ // Set the segmentation Map set_segmentation_map(ptr, seg_map); // Activate segmentation. enable_segmentation(ptr); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = 0; feature_data[MB_LVL_ALT_Q][1] = 4; feature_data[MB_LVL_ALT_Q][2] = 0; feature_data[MB_LVL_ALT_Q][3] = 0; // Set up the loop segment data feature_data[MB_LVL_ALT_LF][0] = 0; feature_data[MB_LVL_ALT_LF][1] = 0; feature_data[MB_LVL_ALT_LF][2] = 0; feature_data[MB_LVL_ALT_LF][3] = 0; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data(ptr, &feature_data[0][0], SEGMENT_DELTADATA); // Delete sementation map if (seg_map != 0) vpx_free(seg_map); seg_map = 0; } // A simple function to cyclically refresh the background at a lower Q static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) { unsigned char *seg_map; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; int i; int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols; // Create a temporary map for segmentation data. CHECK_MEM_ERROR(seg_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); cpi->cyclic_refresh_q = Q; for (i = Q; i > 0; i--) { if (vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*(Q + 128)) / 64)) //if ( vp8_bits_per_mb[cpi->common.frame_type][i] >= ((vp8_bits_per_mb[cpi->common.frame_type][Q]*((2*Q)+96))/64) ) { break; } } cpi->cyclic_refresh_q = i; // Only update for inter frames if (cpi->common.frame_type != KEY_FRAME) { // Cycle through the macro_block rows // MB loop to set local segmentation map for (i = cpi->cyclic_refresh_mode_index; i < mbs_in_frame; i++) { // If the MB is as a candidate for clean up then mark it for possible boost/refresh (segment 1) // The segment id may get reset to 0 later if the MB gets coded anything other than last frame 0,0 // as only (last frame 0,0) MBs are eligable for refresh : that is to say Mbs likely to be background blocks. if (cpi->cyclic_refresh_map[i] == 0) { seg_map[i] = 1; } else { seg_map[i] = 0; // Skip blocks that have been refreshed recently anyway. if (cpi->cyclic_refresh_map[i] < 0) //cpi->cyclic_refresh_map[i] = cpi->cyclic_refresh_map[i] / 16; cpi->cyclic_refresh_map[i]++; } if (block_count > 0) block_count--; else break; } // If we have gone through the frame reset to the start cpi->cyclic_refresh_mode_index = i; if (cpi->cyclic_refresh_mode_index >= mbs_in_frame) cpi->cyclic_refresh_mode_index = 0; } // Set the segmentation Map set_segmentation_map((VP8_PTR)cpi, seg_map); // Activate segmentation. enable_segmentation((VP8_PTR)cpi); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = 0; feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q); feature_data[MB_LVL_ALT_Q][2] = 0; feature_data[MB_LVL_ALT_Q][3] = 0; // Set up the loop segment data feature_data[MB_LVL_ALT_LF][0] = 0; feature_data[MB_LVL_ALT_LF][1] = lf_adjustment; feature_data[MB_LVL_ALT_LF][2] = 0; feature_data[MB_LVL_ALT_LF][3] = 0; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA); // Delete sementation map if (seg_map != 0) vpx_free(seg_map); seg_map = 0; } static void set_default_lf_deltas(VP8_COMP *cpi) { cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1; cpi->mb.e_mbd.mode_ref_lf_delta_update = 1; vpx_memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); vpx_memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); // Test of ref frame deltas cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2; cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0; cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2; cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2; cpi->mb.e_mbd.mode_lf_deltas[0] = 4; // BPRED cpi->mb.e_mbd.mode_lf_deltas[1] = -2; // Zero cpi->mb.e_mbd.mode_lf_deltas[2] = 2; // New mv cpi->mb.e_mbd.mode_lf_deltas[3] = 4; // Split mv } void vp8_set_speed_features(VP8_COMP *cpi) { SPEED_FEATURES *sf = &cpi->sf; int Mode = cpi->compressor_speed; int Speed = cpi->Speed; int i; VP8_COMMON *cm = &cpi->common; // Initialise default mode frequency sampling variables for (i = 0; i < MAX_MODES; i ++) { cpi->mode_check_freq[i] = 0; cpi->mode_test_hit_counts[i] = 0; cpi->mode_chosen_counts[i] = 0; } cpi->mbs_tested_so_far = 0; // best quality sf->RD = 1; sf->search_method = NSTEP; sf->improved_quant = 1; sf->improved_dct = 1; sf->auto_filter = 1; sf->recode_loop = 1; sf->quarter_pixel_search = 1; sf->half_pixel_search = 1; sf->full_freq[0] = 7; sf->full_freq[1] = 7; sf->min_fs_radius = 8; sf->max_fs_radius = 32; sf->iterative_sub_pixel = 1; sf->optimize_coefficients = 1; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; cpi->do_full[0] = 0; cpi->do_full[1] = 0; // default thresholds to 0 for (i = 0; i < MAX_MODES; i++) sf->thresh_mult[i] = 0; switch (Mode) { #if !(CONFIG_REALTIME_ONLY) case 0: // best quality mode sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_ZEROG ] = 0; sf->thresh_mult[THR_ZEROA ] = 0; sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_NEARESTG ] = 0; sf->thresh_mult[THR_NEARESTA ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_NEARG ] = 0; sf->thresh_mult[THR_NEARA ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2000; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_NEWMV ] = 1000; sf->thresh_mult[THR_NEWG ] = 1000; sf->thresh_mult[THR_NEWA ] = 1000; sf->thresh_mult[THR_SPLITMV ] = 2500; sf->thresh_mult[THR_SPLITG ] = 5000; sf->thresh_mult[THR_SPLITA ] = 5000; sf->full_freq[0] = 7; sf->full_freq[1] = 15; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } break; case 1: case 3: sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2500; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWMV ] = 1500; sf->thresh_mult[THR_NEWG ] = 1500; sf->thresh_mult[THR_NEWA ] = 1500; sf->thresh_mult[THR_SPLITMV ] = 5000; sf->thresh_mult[THR_SPLITG ] = 10000; sf->thresh_mult[THR_SPLITA ] = 10000; sf->full_freq[0] = 15; sf->full_freq[1] = 31; sf->first_step = 0; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } if (Speed > 0) { // Disable coefficient optimization above speed 0 sf->optimize_coefficients = 0; cpi->mode_check_freq[THR_SPLITG] = 4; cpi->mode_check_freq[THR_SPLITA] = 4; cpi->mode_check_freq[THR_SPLITMV] = 2; sf->thresh_mult[THR_TM ] = 1500; sf->thresh_mult[THR_V_PRED ] = 1500; sf->thresh_mult[THR_H_PRED ] = 1500; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 10000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 1500; sf->thresh_mult[THR_ZEROG ] = 1500; sf->thresh_mult[THR_NEARG ] = 1500; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_SPLITG ] = 20000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 1500; sf->thresh_mult[THR_ZEROA ] = 1500; sf->thresh_mult[THR_NEARA ] = 1500; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITA ] = 20000; } sf->improved_quant = 0; sf->improved_dct = 0; sf->first_step = 1; sf->max_step_search_steps = MAX_MVSEARCH_STEPS; } if (Speed > 1) { cpi->mode_check_freq[THR_SPLITG] = 15; cpi->mode_check_freq[THR_SPLITA] = 15; cpi->mode_check_freq[THR_SPLITMV] = 7; sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 7500; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 25000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 2500; sf->thresh_mult[THR_SPLITG ] = 50000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 2500; sf->thresh_mult[THR_SPLITA ] = 50000; } // Only do recode loop on key frames and golden frames sf->recode_loop = 2; sf->full_freq[0] = 31; sf->full_freq[1] = 63; } if (Speed > 2) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_SPLITA ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; sf->full_freq[0] = 63; sf->full_freq[1] = 127; } if (Speed > 3) { cpi->mode_check_freq[THR_V_PRED] = 0; cpi->mode_check_freq[THR_H_PRED] = 0; cpi->mode_check_freq[THR_B_PRED] = 0; cpi->mode_check_freq[THR_NEARG] = 0; cpi->mode_check_freq[THR_NEWG] = 0; cpi->mode_check_freq[THR_NEARA] = 0; cpi->mode_check_freq[THR_NEWA] = 0; sf->auto_filter = 1; sf->recode_loop = 0; // recode loop off sf->RD = 0; // Turn rd off sf->full_freq[0] = INT_MAX; sf->full_freq[1] = INT_MAX; } if (Speed > 4) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 4000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 4000; } } break; #endif case 2: sf->optimize_coefficients = 0; sf->recode_loop = 0; sf->auto_filter = 1; sf->iterative_sub_pixel = 1; sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_TM ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_V_PRED ] = 1000; sf->thresh_mult[THR_H_PRED ] = 1000; sf->thresh_mult[THR_B_PRED ] = 2500; sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 5000; sf->thresh_mult[THR_SPLITG ] = 10000; sf->thresh_mult[THR_SPLITA ] = 10000; sf->full_freq[0] = 15; sf->full_freq[1] = 31; sf->search_method = NSTEP; if (!(cpi->ref_frame_flags & VP8_LAST_FLAG)) { sf->thresh_mult[THR_NEWMV ] = INT_MAX; sf->thresh_mult[THR_NEARESTMV] = INT_MAX; sf->thresh_mult[THR_ZEROMV ] = INT_MAX; sf->thresh_mult[THR_NEARMV ] = INT_MAX; sf->thresh_mult[THR_SPLITMV ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_GOLD_FLAG)) { sf->thresh_mult[THR_NEARESTG ] = INT_MAX; sf->thresh_mult[THR_ZEROG ] = INT_MAX; sf->thresh_mult[THR_NEARG ] = INT_MAX; sf->thresh_mult[THR_NEWG ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; } if (!(cpi->ref_frame_flags & VP8_ALT_FLAG)) { sf->thresh_mult[THR_NEARESTA ] = INT_MAX; sf->thresh_mult[THR_ZEROA ] = INT_MAX; sf->thresh_mult[THR_NEARA ] = INT_MAX; sf->thresh_mult[THR_NEWA ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; } if (Speed > 0) { cpi->mode_check_freq[THR_SPLITG] = 4; cpi->mode_check_freq[THR_SPLITA] = 4; cpi->mode_check_freq[THR_SPLITMV] = 2; sf->thresh_mult[THR_DC ] = 0; sf->thresh_mult[THR_TM ] = 1000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEARESTMV] = 0; sf->thresh_mult[THR_ZEROMV ] = 0; sf->thresh_mult[THR_NEARMV ] = 0; sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 10000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 1000; sf->thresh_mult[THR_ZEROG ] = 1000; sf->thresh_mult[THR_NEARG ] = 1000; sf->thresh_mult[THR_NEWG ] = 2000; sf->thresh_mult[THR_SPLITG ] = 20000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 1000; sf->thresh_mult[THR_ZEROA ] = 1000; sf->thresh_mult[THR_NEARA ] = 1000; sf->thresh_mult[THR_NEWA ] = 2000; sf->thresh_mult[THR_SPLITA ] = 20000; } sf->improved_quant = 0; sf->improved_dct = 0; } if (Speed > 1) { cpi->mode_check_freq[THR_SPLITMV] = 7; cpi->mode_check_freq[THR_SPLITG] = 15; cpi->mode_check_freq[THR_SPLITA] = 15; sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_V_PRED ] = 2000; sf->thresh_mult[THR_H_PRED ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV ] = 2000; sf->thresh_mult[THR_SPLITMV ] = 25000; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 2500; sf->thresh_mult[THR_SPLITG ] = 50000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 2500; sf->thresh_mult[THR_SPLITA ] = 50000; } sf->full_freq[0] = 31; sf->full_freq[1] = 63; } if (Speed > 2) { sf->auto_filter = 0; // Faster selection of loop filter cpi->mode_check_freq[THR_V_PRED] = 2; cpi->mode_check_freq[THR_H_PRED] = 2; cpi->mode_check_freq[THR_B_PRED] = 2; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_SPLITMV ] = INT_MAX; sf->thresh_mult[THR_SPLITG ] = INT_MAX; sf->thresh_mult[THR_SPLITA ] = INT_MAX; sf->full_freq[0] = 63; sf->full_freq[1] = 127; } if (Speed > 3) { sf->RD = 0; sf->full_freq[0] = INT_MAX; sf->full_freq[1] = INT_MAX; sf->auto_filter = 1; } if (Speed > 4) { sf->auto_filter = 0; // Faster selection of loop filter #if CONFIG_REALTIME_ONLY sf->search_method = HEX; #else sf->search_method = DIAMOND; #endif cpi->mode_check_freq[THR_V_PRED] = 4; cpi->mode_check_freq[THR_H_PRED] = 4; cpi->mode_check_freq[THR_B_PRED] = 4; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_NEARG] = 2; cpi->mode_check_freq[THR_NEWG] = 4; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_NEARA] = 2; cpi->mode_check_freq[THR_NEWA] = 4; } sf->thresh_mult[THR_TM ] = 2000; sf->thresh_mult[THR_B_PRED ] = 5000; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEARESTG ] = 2000; sf->thresh_mult[THR_ZEROG ] = 2000; sf->thresh_mult[THR_NEARG ] = 2000; sf->thresh_mult[THR_NEWG ] = 4000; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEARESTA ] = 2000; sf->thresh_mult[THR_ZEROA ] = 2000; sf->thresh_mult[THR_NEARA ] = 2000; sf->thresh_mult[THR_NEWA ] = 4000; } } if (Speed > 5) { // Disable split MB intra prediction mode sf->thresh_mult[THR_B_PRED] = INT_MAX; } if (Speed > 6) { unsigned int i, sum = 0; unsigned int total_mbs = cm->MBs; int thresh; int total_skip; int min = 2000; sf->iterative_sub_pixel = 0; if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout; min >>= 7; for (i = 0; i < min; i++) { sum += cpi->error_bins[i]; } total_skip = sum; sum = 0; // i starts from 2 to make sure thresh started from 2048 for (; i < 1024; i++) { sum += cpi->error_bins[i]; if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip)) break; } i--; thresh = (i << 7); if (thresh < 2000) thresh = 2000; if (cpi->ref_frame_flags & VP8_LAST_FLAG) { sf->thresh_mult[THR_NEWMV] = thresh; sf->thresh_mult[THR_NEARESTMV ] = thresh >> 1; sf->thresh_mult[THR_NEARMV ] = thresh >> 1; } if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { sf->thresh_mult[THR_NEWG] = thresh << 1; sf->thresh_mult[THR_NEARESTG ] = thresh; sf->thresh_mult[THR_NEARG ] = thresh; } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { sf->thresh_mult[THR_NEWA] = thresh << 1; sf->thresh_mult[THR_NEARESTA ] = thresh; sf->thresh_mult[THR_NEARA ] = thresh; } // Disable other intra prediction modes sf->thresh_mult[THR_TM] = INT_MAX; sf->thresh_mult[THR_V_PRED] = INT_MAX; sf->thresh_mult[THR_H_PRED] = INT_MAX; } if (Speed > 8) { sf->quarter_pixel_search = 0; } if (Speed > 9) { int Tmp = cpi->Speed - 8; if (Tmp > 4) Tmp = 4; if (cpi->ref_frame_flags & VP8_GOLD_FLAG) { cpi->mode_check_freq[THR_ZEROG] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARESTG] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARG] = 1 << Tmp; cpi->mode_check_freq[THR_NEWG] = 1 << (Tmp + 1); } if (cpi->ref_frame_flags & VP8_ALT_FLAG) { cpi->mode_check_freq[THR_ZEROA] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARESTA] = 1 << (Tmp - 1); cpi->mode_check_freq[THR_NEARA] = 1 << Tmp; cpi->mode_check_freq[THR_NEWA] = 1 << (Tmp + 1); } cpi->mode_check_freq[THR_NEWMV] = 1 << (Tmp - 1); } cm->filter_type = NORMAL_LOOPFILTER; if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER; if (Speed >= 15) { sf->half_pixel_search = 0; // This has a big hit on quality. Last resort } vpx_memset(cpi->error_bins, 0, sizeof(cpi->error_bins)); }; if (cpi->sf.search_method == NSTEP) { vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } else if (cpi->sf.search_method == DIAMOND) { vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); } if (cpi->sf.improved_dct) { cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4); } else { cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4); } cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4); if (cpi->sf.improved_quant) { cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb); } else { cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb); } #if CONFIG_RUNTIME_CPU_DETECT cpi->mb.e_mbd.rtcd = &cpi->common.rtcd; #endif if (cpi->sf.iterative_sub_pixel == 1) { cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively; } else if (cpi->sf.quarter_pixel_search) { cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step; } else if (cpi->sf.half_pixel_search) { cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step; } else { cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; } if (cpi->sf.optimize_coefficients == 1) cpi->mb.optimize = 1 + cpi->is_next_src_alt_ref; else cpi->mb.optimize = 0; if (cpi->common.full_pixel) cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; #ifdef SPEEDSTATS frames_at_speed[cpi->Speed]++; #endif } static void alloc_raw_frame_buffers(VP8_COMP *cpi) { int i, buffers; buffers = cpi->oxcf.lag_in_frames; if (buffers > MAX_LAG_BUFFERS) buffers = MAX_LAG_BUFFERS; if (buffers < 1) buffers = 1; for (i = 0; i < buffers; i++) if (vp8_yv12_alloc_frame_buffer(&cpi->src_buffer[i].source_buffer, cpi->oxcf.Width, cpi->oxcf.Height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate lag buffer"); #if VP8_TEMPORAL_ALT_REF if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer.source_buffer, cpi->oxcf.Width, cpi->oxcf.Height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate altref buffer"); #endif cpi->source_buffer_count = 0; } static int vp8_alloc_partition_data(VP8_COMP *cpi) { cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1), sizeof(PARTITION_INFO)); if(!cpi->mb.pip) return ALLOC_FAILURE; cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1; return 0; } void vp8_alloc_compressor_data(VP8_COMP *cpi) { VP8_COMMON *cm = & cpi->common; int width = cm->Width; int height = cm->Height; if (vp8_alloc_frame_buffers(cm, width, height)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate frame buffers"); if (vp8_alloc_partition_data(cpi)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate partition data"); if ((width & 0xf) != 0) width += 16 - (width & 0xf); if ((height & 0xf) != 0) height += 16 - (height & 0xf); if (vp8_yv12_alloc_frame_buffer(&cpi->last_frame_uf, width, height, VP8BORDERINPIXELS)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height, 16)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled source buffer"); if (cpi->tok != 0) vpx_free(cpi->tok); { unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16; CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok))); } // Data used for real time vc mode to see if gf needs refreshing cpi->inter_zz_count = 0; cpi->gf_bad_count = 0; cpi->gf_update_recommended = 0; // Structures used to minitor GF usage if (cpi->gf_active_flags != 0) vpx_free(cpi->gf_active_flags); CHECK_MEM_ERROR(cpi->gf_active_flags, vpx_calloc(1, cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; cpi->total_stats = vpx_calloc(1, vp8_firstpass_stats_sz(cpi->common.MBs)); cpi->this_frame_stats = vpx_calloc(1, vp8_firstpass_stats_sz(cpi->common.MBs)); if(!cpi->total_stats || !cpi->this_frame_stats) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate firstpass stats"); } // Quant MOD static const int q_trans[] = { 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79, 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, }; int vp8_reverse_trans(int x) { int i; for (i = 0; i < 64; i++) if (q_trans[i] >= x) return i; return 63; }; void vp8_new_frame_rate(VP8_COMP *cpi, double framerate) { if(framerate < .1) framerate = 30; cpi->oxcf.frame_rate = framerate; cpi->output_frame_rate = cpi->oxcf.frame_rate; cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate); cpi->av_per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / cpi->output_frame_rate); cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100); cpi->max_gf_interval = (int)(cpi->output_frame_rate / 2) + 2; //cpi->max_gf_interval = (int)(cpi->output_frame_rate * 2 / 3) + 1; //cpi->max_gf_interval = 24; if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12; // Special conditions when altr ref frame enabled in lagged compress mode if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) { if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1; } } static int rescale(int val, int num, int denom) { int64_t llnum = num; int64_t llden = denom; int64_t llval = val; return llval * llnum / llden; } void vp8_init_config(VP8_PTR ptr, VP8_CONFIG *oxcf) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; if (!cpi) return; cpi->auto_gold = 1; cpi->auto_adjust_gold_quantizer = 1; cpi->goldquantizer = 1; cpi->goldfreq = 7; cpi->auto_adjust_key_quantizer = 1; cpi->keyquantizer = 1; cm->version = oxcf->Version; vp8_setup_version(cm); if (oxcf == 0) { cpi->pass = 0; cpi->auto_worst_q = 0; cpi->oxcf.best_allowed_q = MINQ; cpi->oxcf.worst_allowed_q = MAXQ; cpi->oxcf.end_usage = USAGE_STREAM_FROM_SERVER; cpi->oxcf.starting_buffer_level = 4000; cpi->oxcf.optimal_buffer_level = 5000; cpi->oxcf.maximum_buffer_size = 6000; cpi->oxcf.under_shoot_pct = 90; cpi->oxcf.allow_df = 0; cpi->oxcf.drop_frames_water_mark = 20; cpi->oxcf.allow_spatial_resampling = 0; cpi->oxcf.resample_down_water_mark = 40; cpi->oxcf.resample_up_water_mark = 60; cpi->oxcf.fixed_q = cpi->interquantizer; cpi->filter_type = NORMAL_LOOPFILTER; if (cm->simpler_lpf) cpi->filter_type = SIMPLE_LOOPFILTER; cpi->compressor_speed = 1; cpi->horiz_scale = 0; cpi->vert_scale = 0; cpi->oxcf.two_pass_vbrbias = 50; cpi->oxcf.two_pass_vbrmax_section = 400; cpi->oxcf.two_pass_vbrmin_section = 0; cpi->oxcf.Sharpness = 0; cpi->oxcf.noise_sensitivity = 0; } else cpi->oxcf = *oxcf; switch (cpi->oxcf.Mode) { case MODE_REALTIME: cpi->pass = 0; cpi->compressor_speed = 2; if (cpi->oxcf.cpu_used < -16) { cpi->oxcf.cpu_used = -16; } if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16; break; #if !(CONFIG_REALTIME_ONLY) case MODE_GOODQUALITY: cpi->pass = 0; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_BESTQUALITY: cpi->pass = 0; cpi->compressor_speed = 0; break; case MODE_FIRSTPASS: cpi->pass = 1; cpi->compressor_speed = 1; break; case MODE_SECONDPASS: cpi->pass = 2; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_SECONDPASS_BEST: cpi->pass = 2; cpi->compressor_speed = 0; break; #endif } if (cpi->pass == 0) cpi->auto_worst_q = 1; cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; if (oxcf->fixed_q >= 0) { if (oxcf->worst_allowed_q < 0) cpi->oxcf.fixed_q = q_trans[0]; else cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; if (oxcf->alt_q < 0) cpi->oxcf.alt_q = q_trans[0]; else cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; if (oxcf->key_q < 0) cpi->oxcf.key_q = q_trans[0]; else cpi->oxcf.key_q = q_trans[oxcf->key_q]; if (oxcf->gold_q < 0) cpi->oxcf.gold_q = q_trans[0]; else cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; } cpi->baseline_gf_interval = cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; //cpi->use_golden_frame_only = 0; //cpi->use_last_frame_only = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->refresh_entropy_probs = 1; if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) cm->multi_token_partition = (TOKEN_PARTITION) cpi->oxcf.token_partitions; setup_features(cpi); { int i; for (i = 0; i < MAX_MB_SEGMENTS; i++) cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } // At the moment the first order values may not be > MAXQ if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ; // local file playback mode == really big buffer if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { cpi->oxcf.starting_buffer_level = 60000; cpi->oxcf.optimal_buffer_level = 60000; cpi->oxcf.maximum_buffer_size = 240000; } // Convert target bandwidth from Kbit/s to Bit/s cpi->oxcf.target_bandwidth *= 1000; cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.optimal_buffer_level == 0) cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.maximum_buffer_size == 0) cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size, cpi->oxcf.target_bandwidth, 1000); cpi->buffer_level = cpi->oxcf.starting_buffer_level; cpi->bits_off_target = cpi->oxcf.starting_buffer_level; vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate); cpi->worst_quality = cpi->oxcf.worst_allowed_q; cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; cpi->best_quality = cpi->oxcf.best_allowed_q; cpi->active_best_quality = cpi->oxcf.best_allowed_q; cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE; cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->total_actual_bits = 0; cpi->total_target_vs_actual = 0; // Only allow dropped frames in buffered mode cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; cm->filter_type = (LOOPFILTERTYPE) cpi->filter_type; if (!cm->use_bilinear_mc_filter) cm->mcomp_filter_type = SIXTAP; else cm->mcomp_filter_type = BILINEAR; cpi->target_bandwidth = cpi->oxcf.target_bandwidth; cm->Width = cpi->oxcf.Width ; cm->Height = cpi->oxcf.Height ; cpi->intra_frame_target = (4 * (cm->Width + cm->Height) / 15) * 1000; // As per VP8 cm->horiz_scale = cpi->horiz_scale; cm->vert_scale = cpi->vert_scale ; // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7; cm->sharpness_level = cpi->oxcf.Sharpness; if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); // always go to the next whole number cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); } // Clamp KF frame size to quarter of data rate if (cpi->intra_frame_target > cpi->target_bandwidth >> 2) cpi->intra_frame_target = cpi->target_bandwidth >> 2; if (cpi->oxcf.fixed_q >= 0) { cpi->last_q[0] = cpi->oxcf.fixed_q; cpi->last_q[1] = cpi->oxcf.fixed_q; } cpi->Speed = cpi->oxcf.cpu_used; // force to allowlag to 0 if lag_in_frames is 0; if (cpi->oxcf.lag_in_frames == 0) { cpi->oxcf.allow_lag = 0; } // Limit on lag buffers as these are not currently dynamically allocated else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; // YX Temp cpi->last_alt_ref_sei = -1; cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; #if 0 // Experimental RD Code cpi->frame_distortion = 0; cpi->last_frame_distortion = 0; #endif #if VP8_TEMPORAL_ALT_REF cpi->use_weighted_temporal_filter = 0; { int i; cpi->fixed_divide[0] = 0; for (i = 1; i < 512; i++) cpi->fixed_divide[i] = 0x80000 / i; } #endif } /* * This function needs more clean up, i.e. be more tuned torwards * change_config rather than init_config !!!!!!!!!!!!!!!! * YX - 5/28/2009 * */ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; if (!cpi) return; if (!oxcf) return; if (cm->version != oxcf->Version) { cm->version = oxcf->Version; vp8_setup_version(cm); } cpi->oxcf = *oxcf; switch (cpi->oxcf.Mode) { case MODE_REALTIME: cpi->pass = 0; cpi->compressor_speed = 2; if (cpi->oxcf.cpu_used < -16) { cpi->oxcf.cpu_used = -16; } if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16; break; #if !(CONFIG_REALTIME_ONLY) case MODE_GOODQUALITY: cpi->pass = 0; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_BESTQUALITY: cpi->pass = 0; cpi->compressor_speed = 0; break; case MODE_FIRSTPASS: cpi->pass = 1; cpi->compressor_speed = 1; break; case MODE_SECONDPASS: cpi->pass = 2; cpi->compressor_speed = 1; if (cpi->oxcf.cpu_used < -5) { cpi->oxcf.cpu_used = -5; } if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; break; case MODE_SECONDPASS_BEST: cpi->pass = 2; cpi->compressor_speed = 0; break; #endif } if (cpi->pass == 0) cpi->auto_worst_q = 1; cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; if (oxcf->fixed_q >= 0) { if (oxcf->worst_allowed_q < 0) cpi->oxcf.fixed_q = q_trans[0]; else cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; if (oxcf->alt_q < 0) cpi->oxcf.alt_q = q_trans[0]; else cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; if (oxcf->key_q < 0) cpi->oxcf.key_q = q_trans[0]; else cpi->oxcf.key_q = q_trans[oxcf->key_q]; if (oxcf->gold_q < 0) cpi->oxcf.gold_q = q_trans[0]; else cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; } cpi->baseline_gf_interval = cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; //cpi->use_golden_frame_only = 0; //cpi->use_last_frame_only = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->refresh_entropy_probs = 1; if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) cm->multi_token_partition = (TOKEN_PARTITION) cpi->oxcf.token_partitions; setup_features(cpi); { int i; for (i = 0; i < MAX_MB_SEGMENTS; i++) cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } // At the moment the first order values may not be > MAXQ if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ; // local file playback mode == really big buffer if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { cpi->oxcf.starting_buffer_level = 60000; cpi->oxcf.optimal_buffer_level = 60000; cpi->oxcf.maximum_buffer_size = 240000; } // Convert target bandwidth from Kbit/s to Bit/s cpi->oxcf.target_bandwidth *= 1000; cpi->oxcf.starting_buffer_level = rescale(cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.optimal_buffer_level == 0) cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.optimal_buffer_level = rescale(cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000); if (cpi->oxcf.maximum_buffer_size == 0) cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; else cpi->oxcf.maximum_buffer_size = rescale(cpi->oxcf.maximum_buffer_size, cpi->oxcf.target_bandwidth, 1000); cpi->buffer_level = cpi->oxcf.starting_buffer_level; cpi->bits_off_target = cpi->oxcf.starting_buffer_level; vp8_new_frame_rate(cpi, cpi->oxcf.frame_rate); cpi->worst_quality = cpi->oxcf.worst_allowed_q; cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; cpi->best_quality = cpi->oxcf.best_allowed_q; cpi->active_best_quality = cpi->oxcf.best_allowed_q; cpi->buffered_mode = (cpi->oxcf.optimal_buffer_level > 0) ? TRUE : FALSE; cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; cpi->total_actual_bits = 0; cpi->total_target_vs_actual = 0; // Only allow dropped frames in buffered mode cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; cm->filter_type = (LOOPFILTERTYPE) cpi->filter_type; if (!cm->use_bilinear_mc_filter) cm->mcomp_filter_type = SIXTAP; else cm->mcomp_filter_type = BILINEAR; cpi->target_bandwidth = cpi->oxcf.target_bandwidth; cm->Width = cpi->oxcf.Width ; cm->Height = cpi->oxcf.Height ; cm->horiz_scale = cpi->horiz_scale; cm->vert_scale = cpi->vert_scale ; cpi->intra_frame_target = (4 * (cm->Width + cm->Height) / 15) * 1000; // As per VP8 // VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7; cm->sharpness_level = cpi->oxcf.Sharpness; if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); // always go to the next whole number cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; } if (((cm->Width + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_width || ((cm->Height + 15) & 0xfffffff0) != cm->yv12_fb[cm->lst_fb_idx].y_height || cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { alloc_raw_frame_buffers(cpi); vp8_alloc_compressor_data(cpi); } // Clamp KF frame size to quarter of data rate if (cpi->intra_frame_target > cpi->target_bandwidth >> 2) cpi->intra_frame_target = cpi->target_bandwidth >> 2; if (cpi->oxcf.fixed_q >= 0) { cpi->last_q[0] = cpi->oxcf.fixed_q; cpi->last_q[1] = cpi->oxcf.fixed_q; } cpi->Speed = cpi->oxcf.cpu_used; // force to allowlag to 0 if lag_in_frames is 0; if (cpi->oxcf.lag_in_frames == 0) { cpi->oxcf.allow_lag = 0; } // Limit on lag buffers as these are not currently dynamically allocated else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; // YX Temp cpi->last_alt_ref_sei = -1; cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; #if 0 // Experimental RD Code cpi->frame_distortion = 0; cpi->last_frame_distortion = 0; #endif } #define M_LOG2_E 0.693147180559945309417 #define log2f(x) (log (x) / (float) M_LOG2_E) static void cal_mvsadcosts(int *mvsadcost[2]) { int i = 1; mvsadcost [0] [0] = 300; mvsadcost [1] [0] = 300; do { double z = 256 * (2 * (log2f(2 * i) + .6)); mvsadcost [0][i] = (int) z; mvsadcost [1][i] = (int) z; mvsadcost [0][-i] = (int) z; mvsadcost [1][-i] = (int) z; } while (++i <= mv_max); } VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) { int i; volatile union { VP8_COMP *cpi; VP8_PTR ptr; } ctx; VP8_COMP *cpi; VP8_COMMON *cm; cpi = ctx.cpi = vpx_memalign(32, sizeof(VP8_COMP)); // Check that the CPI instance is valid if (!cpi) return 0; cm = &cpi->common; vpx_memset(cpi, 0, sizeof(VP8_COMP)); if (setjmp(cm->error.jmp)) { VP8_PTR ptr = ctx.ptr; ctx.cpi->common.error.setjmp = 0; vp8_remove_compressor(&ptr); return 0; } cpi->common.error.setjmp = 1; CHECK_MEM_ERROR(cpi->rdtok, vpx_calloc(256 * 3 / 2, sizeof(TOKENEXTRA))); CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1)); vp8_create_common(&cpi->common); vp8_cmachine_specific_config(cpi); vp8_init_config((VP8_PTR)cpi, oxcf); memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob)); cpi->common.current_video_frame = 0; cpi->kf_overspend_bits = 0; cpi->kf_bitrate_adjustment = 0; cpi->frames_till_gf_update_due = 0; cpi->gf_overspend_bits = 0; cpi->non_gf_bitrate_adjustment = 0; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; cpi->prob_intra_coded = 63; // Prime the recent reference frame useage counters. // Hereafter they will be maintained as a sort of moving average cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; cpi->recent_ref_frame_usage[LAST_FRAME] = 1; cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; // Set reference frame sign bias for ALTREF frame to 1 (for now) cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; cpi->gf_decay_rate = 0; cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; cpi->gold_is_last = 0 ; cpi->alt_is_last = 0 ; cpi->gold_is_alt = 0 ; // Create the encoder segmentation map and set all entries to 0 CHECK_MEM_ERROR(cpi->segmentation_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols, 1)); vpx_memset(cpi->active_map , 1, (cpi->common.mb_rows * cpi->common.mb_cols)); cpi->active_map_enabled = 0; // Create the first pass motion map structure and set to 0 // Allocate space for maximum of 15 buffers CHECK_MEM_ERROR(cpi->fp_motion_map, vpx_calloc(15*cpi->common.MBs, 1)); #if 0 // Experimental code for lagged and one pass // Initialise one_pass GF frames stats // Update stats used for GF selection if (cpi->pass == 0) { cpi->one_pass_frame_index = 0; for (i = 0; i < MAX_LAG_BUFFERS; i++) { cpi->one_pass_frame_stats[i].frames_so_far = 0; cpi->one_pass_frame_stats[i].frame_intra_error = 0.0; cpi->one_pass_frame_stats[i].frame_coded_error = 0.0; cpi->one_pass_frame_stats[i].frame_pcnt_inter = 0.0; cpi->one_pass_frame_stats[i].frame_pcnt_motion = 0.0; cpi->one_pass_frame_stats[i].frame_mvr = 0.0; cpi->one_pass_frame_stats[i].frame_mvr_abs = 0.0; cpi->one_pass_frame_stats[i].frame_mvc = 0.0; cpi->one_pass_frame_stats[i].frame_mvc_abs = 0.0; } } #endif // Should we use the cyclic refresh method. // Currently this is tied to error resilliant mode cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode; cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 40; cpi->cyclic_refresh_mode_index = 0; cpi->cyclic_refresh_q = 32; if (cpi->cyclic_refresh_mode_enabled) { CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1)); } else cpi->cyclic_refresh_map = (signed char *) NULL; // Test function for segmentation //segmentation_test_function((VP8_PTR) cpi); #ifdef ENTROPY_STATS init_context_counters(); #endif cpi->frames_since_key = 8; // Give a sensible default for the first frame. cpi->key_frame_frequency = cpi->oxcf.key_freq; cpi->source_alt_ref_pending = FALSE; cpi->source_alt_ref_active = FALSE; cpi->common.refresh_alt_ref_frame = 0; cpi->b_calculate_psnr = CONFIG_PSNR; #if CONFIG_PSNR cpi->b_calculate_ssimg = 0; cpi->count = 0; cpi->bytes = 0; if (cpi->b_calculate_psnr) { cpi->total_sq_error = 0.0; cpi->total_sq_error2 = 0.0; cpi->total_y = 0.0; cpi->total_u = 0.0; cpi->total_v = 0.0; cpi->total = 0.0; cpi->totalp_y = 0.0; cpi->totalp_u = 0.0; cpi->totalp_v = 0.0; cpi->totalp = 0.0; cpi->tot_recode_hits = 0; cpi->summed_quality = 0; cpi->summed_weights = 0; } if (cpi->b_calculate_ssimg) { cpi->total_ssimg_y = 0; cpi->total_ssimg_u = 0; cpi->total_ssimg_v = 0; cpi->total_ssimg_all = 0; } #ifndef LLONG_MAX #define LLONG_MAX 9223372036854775807LL #endif cpi->first_time_stamp_ever = LLONG_MAX; #endif cpi->frames_till_gf_update_due = 0; cpi->key_frame_count = 1; cpi->tot_key_frame_bits = 0; cpi->ni_av_qi = cpi->oxcf.worst_allowed_q; cpi->ni_tot_qi = 0; cpi->ni_frames = 0; cpi->total_byte_count = 0; cpi->drop_frame = 0; cpi->drop_count = 0; cpi->max_drop_count = 0; cpi->max_consec_dropped_frames = 4; cpi->rate_correction_factor = 1.0; cpi->key_frame_rate_correction_factor = 1.0; cpi->gf_rate_correction_factor = 1.0; cpi->est_max_qcorrection_factor = 1.0; cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max+1]; cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max+1]; cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mv_max+1]; cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mv_max+1]; cal_mvsadcosts(cpi->mb.mvsadcost); for (i = 0; i < KEY_FRAME_CONTEXT; i++) { cpi->prior_key_frame_size[i] = cpi->intra_frame_target; cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate; } cpi->check_freq[0] = 15; cpi->check_freq[1] = 15; #ifdef OUTPUT_YUV_SRC yuv_file = fopen("bd.yuv", "ab"); #endif #if 0 framepsnr = fopen("framepsnr.stt", "a"); kf_list = fopen("kf_list.stt", "w"); #endif cpi->output_pkt_list = oxcf->output_pkt_list; #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 1) { vp8_init_first_pass(cpi); } else if (cpi->pass == 2) { size_t packet_sz = vp8_firstpass_stats_sz(cpi->common.MBs); int packets = oxcf->two_pass_stats_in.sz / packet_sz; cpi->stats_in = oxcf->two_pass_stats_in.buf; cpi->stats_in_end = (void*)((char *)cpi->stats_in + (packets - 1) * packet_sz); vp8_init_second_pass(cpi); } #endif if (cpi->compressor_speed == 2) { cpi->cpu_freq = 0; //vp8_get_processor_freq(); cpi->avg_encode_time = 0; cpi->avg_pick_mode_time = 0; } vp8_set_speed_features(cpi); // Set starting values of RD threshold multipliers (128 = *1) for (i = 0; i < MAX_MODES; i++) { cpi->rd_thresh_mult[i] = 128; } #ifdef ENTROPY_STATS init_mv_ref_counts(); #endif vp8cx_create_encoder_threads(cpi); cpi->fn_ptr[BLOCK_16X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16); cpi->fn_ptr[BLOCK_16X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x16); cpi->fn_ptr[BLOCK_16X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x16); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_h); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_v); cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = VARIANCE_INVOKE(&cpi->rtcd.variance, halfpixvar16x16_hv); cpi->fn_ptr[BLOCK_16X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x3); cpi->fn_ptr[BLOCK_16X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x8); cpi->fn_ptr[BLOCK_16X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x16x4d); cpi->fn_ptr[BLOCK_16X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8); cpi->fn_ptr[BLOCK_16X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var16x8); cpi->fn_ptr[BLOCK_16X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar16x8); cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_16X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x3); cpi->fn_ptr[BLOCK_16X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x8); cpi->fn_ptr[BLOCK_16X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad16x8x4d); cpi->fn_ptr[BLOCK_8X16].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16); cpi->fn_ptr[BLOCK_8X16].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x16); cpi->fn_ptr[BLOCK_8X16].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x16); cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_8X16].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x3); cpi->fn_ptr[BLOCK_8X16].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x8); cpi->fn_ptr[BLOCK_8X16].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x16x4d); cpi->fn_ptr[BLOCK_8X8].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8); cpi->fn_ptr[BLOCK_8X8].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var8x8); cpi->fn_ptr[BLOCK_8X8].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar8x8); cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_8X8].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x3); cpi->fn_ptr[BLOCK_8X8].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x8); cpi->fn_ptr[BLOCK_8X8].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad8x8x4d); cpi->fn_ptr[BLOCK_4X4].sdf = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4); cpi->fn_ptr[BLOCK_4X4].vf = VARIANCE_INVOKE(&cpi->rtcd.variance, var4x4); cpi->fn_ptr[BLOCK_4X4].svf = VARIANCE_INVOKE(&cpi->rtcd.variance, subpixvar4x4); cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL; cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL; cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL; cpi->fn_ptr[BLOCK_4X4].sdx3f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x3); cpi->fn_ptr[BLOCK_4X4].sdx8f = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x8); cpi->fn_ptr[BLOCK_4X4].sdx4df = VARIANCE_INVOKE(&cpi->rtcd.variance, sad4x4x4d); #if !(CONFIG_REALTIME_ONLY) cpi->full_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, full_search); #endif cpi->diamond_search_sad = SEARCH_INVOKE(&cpi->rtcd.search, diamond_search); cpi->ready_for_new_frame = 1; cpi->source_encode_index = 0; // make sure frame 1 is okay cpi->error_bins[0] = cpi->common.MBs; //vp8cx_init_quantizer() is first called here. Add check in vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only called later //when needed. This will avoid unnecessary calls of vp8cx_init_quantizer() for every frame. vp8cx_init_quantizer(cpi); { vp8_init_loop_filter(cm); cm->last_frame_type = KEY_FRAME; cm->last_filter_type = cm->filter_type; cm->last_sharpness_level = cm->sharpness_level; } cpi->common.error.setjmp = 0; return (VP8_PTR) cpi; } void vp8_remove_compressor(VP8_PTR *ptr) { VP8_COMP *cpi = (VP8_COMP *)(*ptr); if (!cpi) return; if (cpi && (cpi->common.current_video_frame > 0)) { #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) { vp8_end_second_pass(cpi); } #endif #ifdef ENTROPY_STATS print_context_counters(); print_tree_update_probs(); print_mode_context(); #endif #if CONFIG_PSNR if (cpi->pass != 1) { FILE *f = fopen("opsnr.stt", "a"); double time_encoded = (cpi->source_end_time_stamp - cpi->first_time_stamp_ever) / 10000000.000; double total_encode_time = (cpi->time_receive_data + cpi->time_compress_data) / 1000.000; double dr = (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded; if (cpi->b_calculate_psnr) { YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; double samples = 3.0 / 2 * cpi->count * lst_yv12->y_width * lst_yv12->y_height; double total_psnr = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error); double total_psnr2 = vp8_mse2psnr(samples, 255.0, cpi->total_sq_error2); double total_ssim = 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0); fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\tVPXSSIM\t Time(us)\n"); fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f %8.0f\n", dr, cpi->total / cpi->count, total_psnr, cpi->totalp / cpi->count, total_psnr2, total_ssim, total_encode_time); } if (cpi->b_calculate_ssimg) { fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t Time(us)\n"); fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr, cpi->total_ssimg_y / cpi->count, cpi->total_ssimg_u / cpi->count, cpi->total_ssimg_v / cpi->count, cpi->total_ssimg_all / cpi->count, total_encode_time); } fclose(f); #if 0 f = fopen("qskip.stt", "a"); fprintf(f, "minq:%d -maxq:%d skipture:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount); fclose(f); #endif } #endif #ifdef SPEEDSTATS if (cpi->compressor_speed == 2) { int i; FILE *f = fopen("cxspeed.stt", "a"); cnt_pm /= cpi->common.MBs; for (i = 0; i < 16; i++) fprintf(f, "%5d", frames_at_speed[i]); fprintf(f, "\n"); //fprintf(f, "%10d PM %10d %10d %10d EF %10d %10d %10d\n", cpi->Speed, cpi->avg_pick_mode_time, (tot_pm/cnt_pm), cnt_pm, cpi->avg_encode_time, 0, 0); fclose(f); } #endif #ifdef MODE_STATS { extern int count_mb_seg[4]; FILE *f = fopen("modes.stt", "a"); double dr = (double)cpi->oxcf.frame_rate * (double)bytes * (double)8 / (double)count / (double)1000 ; fprintf(f, "intra_mode in Intra Frames:\n"); fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]); fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]); fprintf(f, "B: "); { int i; for (i = 0; i < 10; i++) fprintf(f, "%8d, ", b_modes[i]); fprintf(f, "\n"); } fprintf(f, "Modes in Inter Frames:\n"); fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n", inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4], inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]); fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]); fprintf(f, "B: "); { int i; for (i = 0; i < 15; i++) fprintf(f, "%8d, ", inter_b_modes[i]); fprintf(f, "\n"); } fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]); fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]); fclose(f); } #endif #ifdef ENTROPY_STATS { int i, j, k; FILE *fmode = fopen("modecontext.c", "w"); fprintf(fmode, "\n#include \"entropymode.h\"\n\n"); fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts "); fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n"); for (i = 0; i < 10; i++) { fprintf(fmode, " { //Above Mode : %d\n", i); for (j = 0; j < 10; j++) { fprintf(fmode, " {"); for (k = 0; k < 10; k++) { if (!intra_mode_stats[i][j][k]) fprintf(fmode, " %5d, ", 1); else fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]); } fprintf(fmode, "}, // left_mode %d\n", j); } fprintf(fmode, " },\n"); } fprintf(fmode, "};\n"); fclose(fmode); } #endif #if defined(SECTIONBITS_OUTPUT) if (0) { int i; FILE *f = fopen("tokenbits.stt", "a"); for (i = 0; i < 28; i++) fprintf(f, "%8d", (int)(Sectionbits[i] / 256)); fprintf(f, "\n"); fclose(f); } #endif #if 0 { printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000); printf("\n_frames recive_data encod_mb_row compress_frame Total\n"); printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000); } #endif } vp8cx_remove_encoder_threads(cpi); vp8_dealloc_compressor_data(cpi); vpx_free(cpi->mb.ss); vpx_free(cpi->tok); vpx_free(cpi->rdtok); vpx_free(cpi->cyclic_refresh_map); vp8_remove_common(&cpi->common); vpx_free(cpi); *ptr = 0; #ifdef OUTPUT_YUV_SRC fclose(yuv_file); #endif #if 0 if (keyfile) fclose(keyfile); if (framepsnr) fclose(framepsnr); if (kf_list) fclose(kf_list); #endif } static uint64_t calc_plane_error(unsigned char *orig, int orig_stride, unsigned char *recon, int recon_stride, unsigned int cols, unsigned int rows, vp8_variance_rtcd_vtable_t *rtcd) { unsigned int row, col; uint64_t total_sse = 0; int diff; for (row = 0; row + 16 <= rows; row += 16) { for (col = 0; col + 16 <= cols; col += 16) { unsigned int sse; VARIANCE_INVOKE(rtcd, mse16x16)(orig + col, orig_stride, recon + col, recon_stride, &sse); total_sse += sse; } /* Handle odd-sized width */ if (col < cols) { unsigned int border_row, border_col; unsigned char *border_orig = orig; unsigned char *border_recon = recon; for (border_row = 0; border_row < 16; border_row++) { for (border_col = col; border_col < cols; border_col++) { diff = border_orig[border_col] - border_recon[border_col]; total_sse += diff * diff; } border_orig += orig_stride; border_recon += recon_stride; } } orig += orig_stride * 16; recon += recon_stride * 16; } /* Handle odd-sized height */ for (; row < rows; row++) { for (col = 0; col < cols; col++) { diff = orig[col] - recon[col]; total_sse += diff * diff; } orig += orig_stride; recon += recon_stride; } return total_sse; } static void generate_psnr_packet(VP8_COMP *cpi) { YV12_BUFFER_CONFIG *orig = cpi->Source; YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; struct vpx_codec_cx_pkt pkt; uint64_t sse; int i; unsigned int width = cpi->common.Width; unsigned int height = cpi->common.Height; pkt.kind = VPX_CODEC_PSNR_PKT; sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer, recon->y_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] = sse; pkt.data.psnr.sse[1] = sse; pkt.data.psnr.samples[0] = width * height; pkt.data.psnr.samples[1] = width * height; width = (width + 1) / 2; height = (height + 1) / 2; sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer, recon->uv_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] += sse; pkt.data.psnr.sse[2] = sse; pkt.data.psnr.samples[0] += width * height; pkt.data.psnr.samples[2] = width * height; sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer, recon->uv_stride, width, height, IF_RTCD(&cpi->rtcd.variance)); pkt.data.psnr.sse[0] += sse; pkt.data.psnr.sse[3] = sse; pkt.data.psnr.samples[0] += width * height; pkt.data.psnr.samples[3] = width * height; for (i = 0; i < 4; i++) pkt.data.psnr.psnr[i] = vp8_mse2psnr(pkt.data.psnr.samples[i], 255.0, pkt.data.psnr.sse[i]); vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); } int vp8_use_as_reference(VP8_PTR ptr, int ref_frame_flags) { VP8_COMP *cpi = (VP8_COMP *)(ptr); if (ref_frame_flags > 7) return -1 ; cpi->ref_frame_flags = ref_frame_flags; return 0; } int vp8_update_reference(VP8_PTR ptr, int ref_frame_flags) { VP8_COMP *cpi = (VP8_COMP *)(ptr); if (ref_frame_flags > 7) return -1 ; cpi->common.refresh_golden_frame = 0; cpi->common.refresh_alt_ref_frame = 0; cpi->common.refresh_last_frame = 0; if (ref_frame_flags & VP8_LAST_FLAG) cpi->common.refresh_last_frame = 1; if (ref_frame_flags & VP8_GOLD_FLAG) cpi->common.refresh_golden_frame = 1; if (ref_frame_flags & VP8_ALT_FLAG) cpi->common.refresh_alt_ref_frame = 1; return 0; } int vp8_get_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) ref_fb_idx = cm->alt_fb_idx; else return -1; vp8_yv12_copy_frame_ptr(&cm->yv12_fb[ref_fb_idx], sd); return 0; } int vp8_set_reference(VP8_PTR ptr, VP8_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { VP8_COMP *cpi = (VP8_COMP *)(ptr); VP8_COMMON *cm = &cpi->common; int ref_fb_idx; if (ref_frame_flag == VP8_LAST_FLAG) ref_fb_idx = cm->lst_fb_idx; else if (ref_frame_flag == VP8_GOLD_FLAG) ref_fb_idx = cm->gld_fb_idx; else if (ref_frame_flag == VP8_ALT_FLAG) ref_fb_idx = cm->alt_fb_idx; else return -1; vp8_yv12_copy_frame_ptr(sd, &cm->yv12_fb[ref_fb_idx]); return 0; } int vp8_update_entropy(VP8_PTR comp, int update) { VP8_COMP *cpi = (VP8_COMP *) comp; VP8_COMMON *cm = &cpi->common; cm->refresh_entropy_probs = update; return 0; } #if OUTPUT_YUV_SRC void vp8_write_yuv_frame(const char *name, YV12_BUFFER_CONFIG *s) { FILE *yuv_file = fopen(name, "ab"); unsigned char *src = s->y_buffer; int h = s->y_height; do { fwrite(src, s->y_width, 1, yuv_file); src += s->y_stride; } while (--h); src = s->u_buffer; h = s->uv_height; do { fwrite(src, s->uv_width, 1, yuv_file); src += s->uv_stride; } while (--h); src = s->v_buffer; h = s->uv_height; do { fwrite(src, s->uv_width, 1, yuv_file); src += s->uv_stride; } while (--h); fclose(yuv_file); } #endif static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // are we resizing the image if (cm->horiz_scale != 0 || cm->vert_scale != 0) { #if CONFIG_SPATIAL_RESAMPLING int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); int tmp_height; if (cm->vert_scale == 3) tmp_height = 9; else tmp_height = 11; Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); vp8_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer, tmp_height, hs, hr, vs, vr, 0); cpi->Source = &cpi->scaled_source; #endif } // we may need to copy to a buffer so we can extend the image... else if (cm->Width != cm->yv12_fb[cm->lst_fb_idx].y_width || cm->Height != cm->yv12_fb[cm->lst_fb_idx].y_height) { //vp8_yv12_copy_frame_ptr(sd, &cpi->scaled_source); #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_yv12_copy_src_frame_func_neon(sd, &cpi->scaled_source); } #if CONFIG_RUNTIME_CPU_DETECT else #endif #endif #if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT { vp8_yv12_copy_frame_ptr(sd, &cpi->scaled_source); } #endif cpi->Source = &cpi->scaled_source; } vp8_extend_to_multiple_of16(cpi->Source, cm->Width, cm->Height); } static void resize_key_frame(VP8_COMP *cpi) { #if CONFIG_SPATIAL_RESAMPLING VP8_COMMON *cm = &cpi->common; // Do we need to apply resampling for one pass cbr. // In one pass this is more limited than in two pass cbr // The test and any change is only made one per key frame sequence if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) { int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); int new_width, new_height; // If we are below the resample DOWN watermark then scale down a notch. if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100)) { cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO; cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO; } // Should we now start scaling back up else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100)) { cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL; cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL; } // Get the new hieght and width Scale2Ratio(cm->horiz_scale, &hr, &hs); Scale2Ratio(cm->vert_scale, &vr, &vs); new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; // If the image size has changed we need to reallocate the buffers // and resample the source image if ((cm->Width != new_width) || (cm->Height != new_height)) { cm->Width = new_width; cm->Height = new_height; vp8_alloc_compressor_data(cpi); scale_and_extend_source(cpi->un_scaled_source, cpi); } } #endif } // return of 0 means drop frame static int pick_frame_size(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // First Frame is a special case if (cm->current_video_frame == 0) { #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) vp8_calc_auto_iframe_target_size(cpi); // 1 Pass there is no information on which to base size so use bandwidth per second * fixed fraction else #endif cpi->this_frame_target = cpi->oxcf.target_bandwidth / 2; // in error resilient mode the first frame is bigger since it likely contains // all the static background if (cpi->oxcf.error_resilient_mode == 1 || (cpi->compressor_speed == 2)) { cpi->this_frame_target *= 3; // 5; } // Key frame from VFW/auto-keyframe/first frame cm->frame_type = KEY_FRAME; } // Special case for forced key frames // The frame sizing here is still far from ideal for 2 pass. else if (cm->frame_flags & FRAMEFLAGS_KEY) { cm->frame_type = KEY_FRAME; resize_key_frame(cpi); vp8_calc_iframe_target_size(cpi); } else if (cm->frame_type == KEY_FRAME) { vp8_calc_auto_iframe_target_size(cpi); } else { // INTER frame: compute target frame size cm->frame_type = INTER_FRAME; vp8_calc_pframe_target_size(cpi); // Check if we're dropping the frame: if (cpi->drop_frame) { cpi->drop_frame = FALSE; cpi->drop_count++; return 0; } } // Note target_size in bits * 256 per MB cpi->target_bits_per_mb = (cpi->this_frame_target * 256) / cpi->common.MBs; return 1; } static void set_quantizer(VP8_COMP *cpi, int Q) { VP8_COMMON *cm = &cpi->common; MACROBLOCKD *mbd = &cpi->mb.e_mbd; cm->base_qindex = Q; cm->y1dc_delta_q = 0; cm->y2dc_delta_q = 0; cm->y2ac_delta_q = 0; cm->uvdc_delta_q = 0; cm->uvac_delta_q = 0; // Set Segment specific quatizers mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0]; mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1]; mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2]; mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3]; } static void update_alt_ref_frame_and_stats(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // Update the golden frame buffer vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); // Select an interval before next GF or altref if (!cpi->auto_gold) cpi->frames_till_gf_update_due = cpi->goldfreq; if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) { cpi->current_gf_interval = cpi->frames_till_gf_update_due; // Set the bits per frame that we should try and recover in subsequent inter frames // to account for the extra GF spend... note that his does not apply for GF updates // that occur coincident with a key frame as the extra cost of key frames is dealt // with elsewhere. cpi->gf_overspend_bits += cpi->projected_frame_size; cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; } // Update data structure that monitors level of reference to last GF vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; // this frame refreshes means next frames don't unless specified by user cpi->common.frames_since_golden = 0; // Clear the alternate reference update pending flag. cpi->source_alt_ref_pending = FALSE; // Set the alternate refernce frame active flag cpi->source_alt_ref_active = TRUE; } static void update_golden_frame_and_stats(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; // Update the Golden frame reconstruction buffer if signalled and the GF usage counts. if (cm->refresh_golden_frame) { // Update the golden frame buffer vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); // Select an interval before next GF if (!cpi->auto_gold) cpi->frames_till_gf_update_due = cpi->goldfreq; if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) { cpi->current_gf_interval = cpi->frames_till_gf_update_due; // Set the bits per frame that we should try and recover in subsequent inter frames // to account for the extra GF spend... note that his does not apply for GF updates // that occur coincident with a key frame as the extra cost of key frames is dealt // with elsewhere. if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) { // Calcluate GF bits to be recovered // Projected size - av frame bits available for inter frames for clip as a whole cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target); } cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; } // Update data structure that monitors level of reference to last GF vpx_memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; // this frame refreshes means next frames don't unless specified by user cm->refresh_golden_frame = 0; cpi->common.frames_since_golden = 0; //if ( cm->frame_type == KEY_FRAME ) //{ cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; cpi->recent_ref_frame_usage[LAST_FRAME] = 1; cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; //} //else //{ // // Carry a potrtion of count over to begining of next gf sequence // cpi->recent_ref_frame_usage[INTRA_FRAME] >>= 5; // cpi->recent_ref_frame_usage[LAST_FRAME] >>= 5; // cpi->recent_ref_frame_usage[GOLDEN_FRAME] >>= 5; // cpi->recent_ref_frame_usage[ALTREF_FRAME] >>= 5; //} // ******** Fixed Q test code only ************ // If we are going to use the ALT reference for the next group of frames set a flag to say so. if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame) { cpi->source_alt_ref_pending = TRUE; cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; } if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = FALSE; // Decrement count down till next gf if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; } else if (!cpi->common.refresh_alt_ref_frame) { // Decrement count down till next gf if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; if (cpi->common.frames_till_alt_ref_frame) cpi->common.frames_till_alt_ref_frame --; cpi->common.frames_since_golden ++; if (cpi->common.frames_since_golden > 1) { cpi->recent_ref_frame_usage[INTRA_FRAME] += cpi->count_mb_ref_frame_usage[INTRA_FRAME]; cpi->recent_ref_frame_usage[LAST_FRAME] += cpi->count_mb_ref_frame_usage[LAST_FRAME]; cpi->recent_ref_frame_usage[GOLDEN_FRAME] += cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]; cpi->recent_ref_frame_usage[ALTREF_FRAME] += cpi->count_mb_ref_frame_usage[ALTREF_FRAME]; } } } // This function updates the reference frame probability estimates that // will be used during mode selection static void update_rd_ref_frame_probs(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; #if 0 const int *const rfct = cpi->recent_ref_frame_usage; const int rf_intra = rfct[INTRA_FRAME]; const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; if (cm->frame_type == KEY_FRAME) { cpi->prob_intra_coded = 255; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else if (!(rf_intra + rf_inter)) { // This is a trap in case this function is called with cpi->recent_ref_frame_usage[] blank. cpi->prob_intra_coded = 63; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else { cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter); if (cpi->prob_intra_coded < 1) cpi->prob_intra_coded = 1; if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active) { cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; if (cpi->prob_last_coded < 1) cpi->prob_last_coded = 1; cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; if (cpi->prob_gf_coded < 1) cpi->prob_gf_coded = 1; } } #else const int *const rfct = cpi->count_mb_ref_frame_usage; const int rf_intra = rfct[INTRA_FRAME]; const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; if (cm->frame_type == KEY_FRAME) { cpi->prob_intra_coded = 255; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else if (!(rf_intra + rf_inter)) { // This is a trap in case this function is called with cpi->recent_ref_frame_usage[] blank. cpi->prob_intra_coded = 63; cpi->prob_last_coded = 128; cpi->prob_gf_coded = 128; } else { cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter); if (cpi->prob_intra_coded < 1) cpi->prob_intra_coded = 1; cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; if (cpi->prob_last_coded < 1) cpi->prob_last_coded = 1; cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; if (cpi->prob_gf_coded < 1) cpi->prob_gf_coded = 1; } // update reference frame costs since we can do better than what we got last frame. if (cpi->common.refresh_alt_ref_frame) { cpi->prob_intra_coded += 40; cpi->prob_last_coded = 200; cpi->prob_gf_coded = 1; } else if (cpi->common.frames_since_golden == 0) { cpi->prob_last_coded = 214; cpi->prob_gf_coded = 1; } else if (cpi->common.frames_since_golden == 1) { cpi->prob_last_coded = 192; cpi->prob_gf_coded = 220; } else if (cpi->source_alt_ref_active) { //int dist = cpi->common.frames_till_alt_ref_frame + cpi->common.frames_since_golden; cpi->prob_gf_coded -= 20; if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10; } #endif } // 1 = key, 0 = inter static int decide_key_frame(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; int code_key_frame = FALSE; cpi->kf_boost = 0; if (cpi->Speed > 11) return FALSE; // Clear down mmx registers vp8_clear_system_state(); //__asm emms; if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) { double change = 1.0 * abs((int)(cpi->intra_error - cpi->last_intra_error)) / (1 + cpi->last_intra_error); double change2 = 1.0 * abs((int)(cpi->prediction_error - cpi->last_prediction_error)) / (1 + cpi->last_prediction_error); double minerror = cm->MBs * 256; #if 0 if (10 * cpi->intra_error / (1 + cpi->prediction_error) < 15 && cpi->prediction_error > minerror && (change > .25 || change2 > .25)) { FILE *f = fopen("intra_inter.stt", "a"); if (cpi->prediction_error <= 0) cpi->prediction_error = 1; fprintf(f, "%d %d %d %d %14.4f\n", cm->current_video_frame, (int) cpi->prediction_error, (int) cpi->intra_error, (int)((10 * cpi->intra_error) / cpi->prediction_error), change); fclose(f); } #endif cpi->last_intra_error = cpi->intra_error; cpi->last_prediction_error = cpi->prediction_error; if (10 * cpi->intra_error / (1 + cpi->prediction_error) < 15 && cpi->prediction_error > minerror && (change > .25 || change2 > .25)) { /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/ return TRUE; } return FALSE; } // If the following are true we might as well code a key frame if (((cpi->this_frame_percent_intra == 100) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) || ((cpi->this_frame_percent_intra > 95) && (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5)))) { code_key_frame = TRUE; } // in addition if the following are true and this is not a golden frame then code a key frame // Note that on golden frames there often seems to be a pop in intra useage anyway hence this // restriction is designed to prevent spurious key frames. The Intra pop needs to be investigated. else if (((cpi->this_frame_percent_intra > 60) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) || ((cpi->this_frame_percent_intra > 75) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) || ((cpi->this_frame_percent_intra > 90) && (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10)))) { if (!cm->refresh_golden_frame) code_key_frame = TRUE; } return code_key_frame; } #if !(CONFIG_REALTIME_ONLY) static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) { (void) size; (void) dest; (void) frame_flags; set_quantizer(cpi, 26); scale_and_extend_source(cpi->un_scaled_source, cpi); vp8_first_pass(cpi); } #endif #if 0 void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) { // write the frame FILE *yframe; int i; char filename[255]; sprintf(filename, "cx\\y%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->y_height; i++) fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\u%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->uv_height; i++) fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\v%04d.raw", this_frame); yframe = fopen(filename, "wb"); for (i = 0; i < frame->uv_height; i++) fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); } #endif // return of 0 means drop frame static void encode_frame_to_data_rate ( VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags ) { int Q; int frame_over_shoot_limit; int frame_under_shoot_limit; int Loop = FALSE; int loop_count; int this_q; int last_zbin_oq; int q_low; int q_high; int zbin_oq_high; int zbin_oq_low = 0; int top_index; int bottom_index; VP8_COMMON *cm = &cpi->common; int active_worst_qchanged = FALSE; int overshoot_seen = FALSE; int undershoot_seen = FALSE; int drop_mark = cpi->oxcf.drop_frames_water_mark * cpi->oxcf.optimal_buffer_level / 100; int drop_mark75 = drop_mark * 2 / 3; int drop_mark50 = drop_mark / 4; int drop_mark25 = drop_mark / 8; // Clear down mmx registers to allow floating point in what follows vp8_clear_system_state(); // Test code for segmentation of gf/arf (0,0) //segmentation_test_function((VP8_PTR) cpi); // For an alt ref frame in 2 pass we skip the call to the second pass function that sets the target bandwidth #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 2) { if (cpi->common.refresh_alt_ref_frame) { cpi->per_frame_bandwidth = cpi->gf_bits; // Per frame bit target for the alt ref frame cpi->target_bandwidth = cpi->gf_bits * cpi->output_frame_rate; // per second target bitrate } } else #endif cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_frame_rate); // Default turn off buffer to buffer copying cm->copy_buffer_to_gf = 0; cm->copy_buffer_to_arf = 0; // Clear zbin over-quant value and mode boost values. cpi->zbin_over_quant = 0; cpi->zbin_mode_boost = 0; // Enable mode based tweaking of the zbin cpi->zbin_mode_boost_enabled = TRUE; // Current default encoder behaviour for the altref sign bias if (cpi->source_alt_ref_active) cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; else cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0; // Check to see if a key frame is signalled // For two pass with auto key frame enabled cm->frame_type may already be set, but not for one pass. if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) || (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) { // Key frame from VFW/auto-keyframe/first frame cm->frame_type = KEY_FRAME; } // Set default state for segment and mode based loop filter update flags cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; // Set various flags etc to special state if it is a key frame if (cm->frame_type == KEY_FRAME) { int i; // Reset the loop filter deltas and segmentation map setup_features(cpi); // If segmentation is enabled force a map update for key frames if (cpi->mb.e_mbd.segmentation_enabled) { cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } // The alternate reference frame cannot be active for a key frame cpi->source_alt_ref_active = FALSE; // Reset the RD threshold multipliers to default of * 1 (128) for (i = 0; i < MAX_MODES; i++) { cpi->rd_thresh_mult[i] = 128; } } // Test code for segmentation //if ( (cm->frame_type == KEY_FRAME) || ((cm->current_video_frame % 2) == 0)) //if ( (cm->current_video_frame % 2) == 0 ) // enable_segmentation((VP8_PTR)cpi); //else // disable_segmentation((VP8_PTR)cpi); #if 0 // Experimental code for lagged compress and one pass // Initialise one_pass GF frames stats // Update stats used for GF selection //if ( cpi->pass == 0 ) { cpi->one_pass_frame_index = cm->current_video_frame % MAX_LAG_BUFFERS; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frames_so_far = 0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_intra_error = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_coded_error = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_inter = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_pcnt_motion = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvr_abs = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc = 0.0; cpi->one_pass_frame_stats[cpi->one_pass_frame_index ].frame_mvc_abs = 0.0; } #endif update_rd_ref_frame_probs(cpi); if (cpi->drop_frames_allowed) { // The reset to decimation 0 is only done here for one pass. // Once it is set two pass leaves decimation on till the next kf. if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) cpi->decimation_factor --; if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) cpi->decimation_factor = 1; else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) { cpi->decimation_factor = 3; } else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) { cpi->decimation_factor = 2; } else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) { cpi->decimation_factor = 1; } //vpx_log("Encoder: Decimation Factor: %d \n",cpi->decimation_factor); } // The following decimates the frame rate according to a regular pattern (i.e. to 1/2 or 2/3 frame rate) // This can be used to help prevent buffer under-run in CBR mode. Alternatively it might be desirable in // some situations to drop frame rate but throw more bits at each frame. // // Note that dropping a key frame can be problematic if spatial resampling is also active if (cpi->decimation_factor > 0) { switch (cpi->decimation_factor) { case 1: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2; break; case 2: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; break; case 3: cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; break; } // Note that we should not throw out a key frame (especially when spatial resampling is enabled). if ((cm->frame_type == KEY_FRAME)) // && cpi->oxcf.allow_spatial_resampling ) { cpi->decimation_count = cpi->decimation_factor; } else if (cpi->decimation_count > 0) { cpi->decimation_count --; cpi->bits_off_target += cpi->av_per_frame_bandwidth; cm->current_video_frame++; cpi->frames_since_key++; #if CONFIG_PSNR cpi->count ++; #endif cpi->buffer_level = cpi->bits_off_target; return; } else cpi->decimation_count = cpi->decimation_factor; } // Decide how big to make the frame if (!pick_frame_size(cpi)) { cm->current_video_frame++; cpi->frames_since_key++; return; } // Reduce active_worst_allowed_q for CBR if our buffer is getting too full. // This has a knock on effect on active best quality as well. // For CBR if the buffer reaches its maximum level then we can no longer // save up bits for later frames so we might as well use them up // on the current frame. if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode) { int Adjustment = cpi->active_worst_quality / 4; // Max adjustment is 1/4 if (Adjustment) { int buff_lvl_step; int tmp_lvl = cpi->buffer_level; if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) { buff_lvl_step = (cpi->oxcf.maximum_buffer_size - cpi->oxcf.optimal_buffer_level) / Adjustment; if (buff_lvl_step) { Adjustment = (cpi->buffer_level - cpi->oxcf.optimal_buffer_level) / buff_lvl_step; cpi->active_worst_quality -= Adjustment; } } else { cpi->active_worst_quality -= Adjustment; } } } // Set an active best quality and if necessary active worst quality if (cpi->pass == 2 || (cm->current_video_frame > 150)) { int Q; int i; int bpm_target; //int tmp; vp8_clear_system_state(); Q = cpi->active_worst_quality; if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame) { if (cm->frame_type != KEY_FRAME) { if (cpi->avg_frame_qindex < cpi->active_worst_quality) Q = cpi->avg_frame_qindex; if ( cpi->gfu_boost > 1000 ) cpi->active_best_quality = gf_low_motion_minq[Q]; else if ( cpi->gfu_boost < 400 ) cpi->active_best_quality = gf_high_motion_minq[Q]; else cpi->active_best_quality = gf_mid_motion_minq[Q]; /*cpi->active_best_quality = gf_arf_minq[Q]; tmp = (cpi->gfu_boost > 1000) ? 600 : cpi->gfu_boost - 400; //tmp = (cpi->gfu_boost > 1000) ? 600 : //(cpi->gfu_boost < 400) ? 0 : cpi->gfu_boost - 400; tmp = 128 - (tmp >> 4); cpi->active_best_quality = (cpi->active_best_quality * tmp)>>7;*/ } // KEY FRAMES else { if (cpi->gfu_boost > 600) cpi->active_best_quality = kf_low_motion_minq[Q]; else cpi->active_best_quality = kf_high_motion_minq[Q]; } } else { cpi->active_best_quality = inter_minq[Q]; } // If CBR and the buffer is as full then it is reasonable to allow higher quality on the frames // to prevent bits just going to waste. if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { // Note that the use of >= here elliminates the risk of a devide by 0 error in the else if clause if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) cpi->active_best_quality = cpi->best_quality; else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) { int Fraction = ((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) / (cpi->oxcf.maximum_buffer_size - cpi->oxcf.optimal_buffer_level); int min_qadjustment = ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128; cpi->active_best_quality -= min_qadjustment; } } } // Clip the active best and worst quality values to limits if (cpi->active_worst_quality > cpi->worst_quality) cpi->active_worst_quality = cpi->worst_quality; if (cpi->active_best_quality < cpi->best_quality) cpi->active_best_quality = cpi->best_quality; else if (cpi->active_best_quality > cpi->active_worst_quality) cpi->active_best_quality = cpi->active_worst_quality; // Determine initial Q to try Q = vp8_regulate_q(cpi, cpi->this_frame_target); last_zbin_oq = cpi->zbin_over_quant; // Set highest allowed value for Zbin over quant if (cm->frame_type == KEY_FRAME) zbin_oq_high = 0; //ZBIN_OQ_MAX/16 else if (cm->refresh_alt_ref_frame || (cm->refresh_golden_frame && !cpi->source_alt_ref_active)) zbin_oq_high = 16; else zbin_oq_high = ZBIN_OQ_MAX; // Setup background Q adjustment for error resilliant mode if (cpi->cyclic_refresh_mode_enabled) cyclic_background_refresh(cpi, Q, 0); vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); // Limit Q range for the adaptive loop (Values not clipped to range 20-60 as in VP8). bottom_index = cpi->active_best_quality; top_index = cpi->active_worst_quality; vp8_save_coding_context(cpi); loop_count = 0; q_low = cpi->best_quality; q_high = cpi->worst_quality; scale_and_extend_source(cpi->un_scaled_source, cpi); #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC if (cpi->oxcf.noise_sensitivity > 0) { unsigned char *src; int l = 0; switch (cpi->oxcf.noise_sensitivity) { case 1: l = 20; break; case 2: l = 40; break; case 3: l = 60; break; case 4: l = 80; break; case 5: l = 100; break; case 6: l = 150; break; } if (cm->frame_type == KEY_FRAME) { vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc)); cpi->ppi.frame = 0; } else { vp8_de_noise(cpi->Source, cpi->Source, l , 1, 0, RTCD(postproc)); src = cpi->Source->y_buffer; if (cpi->Source->y_stride < 0) { src += cpi->Source->y_stride * (cpi->Source->y_height - 1); } //temp_filter(&cpi->ppi,src,src, // cm->last_frame.y_width * cm->last_frame.y_height, // cpi->oxcf.noise_sensitivity); } } #endif #ifdef OUTPUT_YUV_SRC vp8_write_yuv_frame(cpi->Source); #endif do { vp8_clear_system_state(); //__asm emms; /* if(cpi->is_src_frame_alt_ref) Q = 127; */ set_quantizer(cpi, Q); this_q = Q; // setup skip prob for costing in mode/mv decision if (cpi->common.mb_no_coeff_skip) { cpi->prob_skip_false = cpi->base_skip_false_prob[Q]; if (cm->frame_type != KEY_FRAME) { if (cpi->common.refresh_alt_ref_frame) { if (cpi->last_skip_false_probs[2] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[2]; /* if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[2]; else if (cpi->last_skip_false_probs[2]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2; */ } else if (cpi->common.refresh_golden_frame) { if (cpi->last_skip_false_probs[1] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[1]; /* if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[1]; else if (cpi->last_skip_false_probs[1]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2; */ } else { if (cpi->last_skip_false_probs[0] != 0) cpi->prob_skip_false = cpi->last_skip_false_probs[0]; /* if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 ) cpi->prob_skip_false = cpi->last_skip_false_probs[0]; else if(cpi->last_skip_false_probs[0]!=0) cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2; */ } //as this is for cost estimate, let's make sure it does not go extreme eitehr way if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5; if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250; if (cpi->is_src_frame_alt_ref) cpi->prob_skip_false = 1; } #if 0 if (cpi->pass != 1) { FILE *f = fopen("skip.stt", "a"); fprintf(f, "%d, %d, %4d ", cpi->common.refresh_golden_frame, cpi->common.refresh_alt_ref_frame, cpi->prob_skip_false); fclose(f); } #endif } if (cm->frame_type == KEY_FRAME) vp8_setup_key_frame(cpi); // transform / motion compensation build reconstruction frame vp8_encode_frame(cpi); cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi); cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0; vp8_clear_system_state(); //__asm emms; // Test to see if the stats generated for this frame indicate that we should have coded a key frame // (assuming that we didn't)! if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME) { if (decide_key_frame(cpi)) { vp8_calc_auto_iframe_target_size(cpi); // Reset all our sizing numbers and recode cm->frame_type = KEY_FRAME; // Clear the Alt reference frame active flag when we have a key frame cpi->source_alt_ref_active = FALSE; // Reset the loop filter deltas and segmentation map setup_features(cpi); // If segmentation is enabled force a map update for key frames if (cpi->mb.e_mbd.segmentation_enabled) { cpi->mb.e_mbd.update_mb_segmentation_map = 1; cpi->mb.e_mbd.update_mb_segmentation_data = 1; } vp8_restore_coding_context(cpi); Q = vp8_regulate_q(cpi, cpi->this_frame_target); q_low = cpi->best_quality; q_high = cpi->worst_quality; vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); // Limit Q range for the adaptive loop (Values not clipped to range 20-60 as in VP8). bottom_index = cpi->active_best_quality; top_index = cpi->active_worst_quality; loop_count++; Loop = TRUE; resize_key_frame(cpi); continue; } } vp8_clear_system_state(); if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1; // Are we are overshooting and up against the limit of active max Q. if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) && (Q == cpi->active_worst_quality) && (cpi->active_worst_quality < cpi->worst_quality) && (cpi->projected_frame_size > frame_over_shoot_limit)) { int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit; // If so is there any scope for relaxing it while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0)) { cpi->active_worst_quality++; top_index = cpi->active_worst_quality; over_size_percent = (int)(over_size_percent * 0.96); // Assume 1 qstep = about 4% on frame size. } // If we have updated the active max Q do not call vp8_update_rate_correction_factors() this loop. active_worst_qchanged = TRUE; } else active_worst_qchanged = FALSE; #if !(CONFIG_REALTIME_ONLY) // Is the projected frame size out of range and are we allowed to attempt to recode. if (((cpi->sf.recode_loop == 1) || ((cpi->sf.recode_loop == 2) && (cm->refresh_golden_frame || (cm->frame_type == KEY_FRAME)))) && (((cpi->projected_frame_size > frame_over_shoot_limit) && (Q < top_index)) || //((cpi->projected_frame_size > frame_over_shoot_limit ) && (Q == top_index) && (cpi->zbin_over_quant < ZBIN_OQ_MAX)) || ((cpi->projected_frame_size < frame_under_shoot_limit) && (Q > bottom_index))) ) { int last_q = Q; int Retries = 0; // Frame size out of permitted range: // Update correction factor & compute new Q to try... if (cpi->projected_frame_size > frame_over_shoot_limit) { //if ( cpi->zbin_over_quant == 0 ) q_low = (Q < q_high) ? (Q + 1) : q_high; // Raise Qlow as to at least the current value if (cpi->zbin_over_quant > 0) // If we are using over quant do the same for zbin_oq_low zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; //if ( undershoot_seen || (Q == MAXQ) ) if (undershoot_seen) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low + 1) / 2; // Adjust cpi->zbin_over_quant (only allowed when Q is max) if (Q < MAXQ) cpi->zbin_over_quant = 0; else { zbin_oq_low = (cpi->zbin_over_quant < zbin_oq_high) ? (cpi->zbin_over_quant + 1) : zbin_oq_high; cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; } } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); while (((Q < q_low) || (cpi->zbin_over_quant < zbin_oq_low)) && (Retries < 10)) { vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); Retries ++; } } overshoot_seen = TRUE; } else { if (cpi->zbin_over_quant == 0) q_high = (Q > q_low) ? (Q - 1) : q_low; // Lower q_high if not using over quant else // else lower zbin_oq_high zbin_oq_high = (cpi->zbin_over_quant > zbin_oq_low) ? (cpi->zbin_over_quant - 1) : zbin_oq_low; if (overshoot_seen) { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 1); Q = (q_high + q_low) / 2; // Adjust cpi->zbin_over_quant (only allowed when Q is max) if (Q < MAXQ) cpi->zbin_over_quant = 0; else cpi->zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; } else { // Update rate_correction_factor unless cpi->active_worst_quality has changed. if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); while (((Q > q_high) || (cpi->zbin_over_quant > zbin_oq_high)) && (Retries < 10)) { vp8_update_rate_correction_factors(cpi, 0); Q = vp8_regulate_q(cpi, cpi->this_frame_target); Retries ++; } } undershoot_seen = TRUE; } // Clamp Q to upper and lower limits: if (Q > q_high) Q = q_high; else if (Q < q_low) Q = q_low; // Clamp cpi->zbin_over_quant cpi->zbin_over_quant = (cpi->zbin_over_quant < zbin_oq_low) ? zbin_oq_low : (cpi->zbin_over_quant > zbin_oq_high) ? zbin_oq_high : cpi->zbin_over_quant; //Loop = ((Q != last_q) || (last_zbin_oq != cpi->zbin_over_quant)) ? TRUE : FALSE; Loop = ((Q != last_q)) ? TRUE : FALSE; last_zbin_oq = cpi->zbin_over_quant; } else #endif Loop = FALSE; if (cpi->is_src_frame_alt_ref) Loop = FALSE; if (Loop == TRUE) { vp8_restore_coding_context(cpi); loop_count++; #if CONFIG_PSNR cpi->tot_recode_hits++; #endif } } while (Loop == TRUE); #if 0 // Experimental code for lagged and one pass // Update stats used for one pass GF selection { /* int frames_so_far; double frame_intra_error; double frame_coded_error; double frame_pcnt_inter; double frame_pcnt_motion; double frame_mvr; double frame_mvr_abs; double frame_mvc; double frame_mvc_abs; */ cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_coded_error = (double)cpi->prediction_error; cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_intra_error = (double)cpi->intra_error; cpi->one_pass_frame_stats[cpi->one_pass_frame_index].frame_pcnt_inter = (double)(100 - cpi->this_frame_percent_intra) / 100.0; } #endif // Update the GF useage maps. // This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter vp8_update_gf_useage_maps(cpi, cm, &cpi->mb); if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1; #if 0 { FILE *f = fopen("gfactive.stt", "a"); fprintf(f, "%8d %8d %8d %8d %8d\n", cm->current_video_frame, (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols), cpi->this_iiratio, cpi->next_iiratio, cm->refresh_golden_frame); fclose(f); } #endif // For inter frames the current default behaviour is that when cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer // This is purely an encoder descision at present. if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) cm->copy_buffer_to_arf = 2; else cm->copy_buffer_to_arf = 0; if (cm->refresh_last_frame) { vp8_swap_yv12_buffer(&cm->yv12_fb[cm->lst_fb_idx], &cm->yv12_fb[cm->new_fb_idx]); cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; } else cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; //#pragma omp parallel sections { //#pragma omp section { struct vpx_usec_timer timer; vpx_usec_timer_start(&timer); if (cpi->sf.auto_filter == 0) vp8cx_pick_filter_level_fast(cpi->Source, cpi); else vp8cx_pick_filter_level(cpi->Source, cpi); vpx_usec_timer_mark(&timer); cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer); if (cm->no_lpf) cm->filter_level = 0; if (cm->filter_level > 0) { vp8cx_set_alt_lf_level(cpi, cm->filter_level); vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, cm->filter_level); cm->last_frame_type = cm->frame_type; cm->last_filter_type = cm->filter_type; cm->last_sharpness_level = cm->sharpness_level; } vp8_yv12_extend_frame_borders_ptr(cm->frame_to_show); if (cpi->oxcf.error_resilient_mode == 1) { cm->refresh_entropy_probs = 0; } } //#pragma omp section { // build the bitstream vp8_pack_bitstream(cpi, dest, size); } } { YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; YV12_BUFFER_CONFIG *alt_yv12 = &cm->yv12_fb[cm->alt_fb_idx]; // At this point the new frame has been encoded coded. // If any buffer copy / swaping is signalled it should be done here. if (cm->frame_type == KEY_FRAME) { vp8_yv12_copy_frame_ptr(cm->frame_to_show, gld_yv12); vp8_yv12_copy_frame_ptr(cm->frame_to_show, alt_yv12); } else // For non key frames { // Code to copy between reference buffers if (cm->copy_buffer_to_arf) { if (cm->copy_buffer_to_arf == 1) { if (cm->refresh_last_frame) // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. vp8_yv12_copy_frame_ptr(new_yv12, alt_yv12); else vp8_yv12_copy_frame_ptr(lst_yv12, alt_yv12); } else if (cm->copy_buffer_to_arf == 2) vp8_yv12_copy_frame_ptr(gld_yv12, alt_yv12); } if (cm->copy_buffer_to_gf) { if (cm->copy_buffer_to_gf == 1) { if (cm->refresh_last_frame) // We copy new_frame here because last and new buffers will already have been swapped if cm->refresh_last_frame is set. vp8_yv12_copy_frame_ptr(new_yv12, gld_yv12); else vp8_yv12_copy_frame_ptr(lst_yv12, gld_yv12); } else if (cm->copy_buffer_to_gf == 2) vp8_yv12_copy_frame_ptr(alt_yv12, gld_yv12); } } } // Update rate control heuristics cpi->total_byte_count += (*size); cpi->projected_frame_size = (*size) << 3; if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2); cpi->last_q[cm->frame_type] = cm->base_qindex; if (cm->frame_type == KEY_FRAME) { vp8_adjust_key_frame_context(cpi); } // Keep a record of ambient average Q. if (cm->frame_type == KEY_FRAME) cpi->avg_frame_qindex = cm->base_qindex; else cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2; // Keep a record from which we can calculate the average Q excluding GF updates and key frames if ((cm->frame_type != KEY_FRAME) && !cm->refresh_golden_frame && !cm->refresh_alt_ref_frame) { cpi->ni_frames++; // Calculate the average Q for normal inter frames (not key or GFU frames) // This is used as a basis for setting active worst quality. if (cpi->ni_frames > 150) { cpi->ni_tot_qi += Q; cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); } // Early in the clip ... average the current frame Q value with the default // entered by the user as a dampening measure else { cpi->ni_tot_qi += Q; cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2; } // If the average Q is higher than what was used in the last frame // (after going through the recode loop to keep the frame size within range) // then use the last frame value - 1. // The -1 is designed to stop Q and hence the data rate, from progressively // falling away during difficult sections, but at the same time reduce the number of // itterations around the recode loop. if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1; } #if 0 // If the frame was massively oversize and we are below optimal buffer level drop next frame if ((cpi->drop_frames_allowed) && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && (cpi->buffer_level < cpi->oxcf.drop_frames_water_mark * cpi->oxcf.optimal_buffer_level / 100) && (cpi->projected_frame_size > (4 * cpi->this_frame_target))) { cpi->drop_frame = TRUE; } #endif // Set the count for maximum consequative dropped frames based upon the ratio of // this frame size to the target average per frame bandwidth. // (cpi->av_per_frame_bandwidth > 0) is just a sanity check to prevent / 0. if (cpi->drop_frames_allowed && (cpi->av_per_frame_bandwidth > 0)) { cpi->max_drop_count = cpi->projected_frame_size / cpi->av_per_frame_bandwidth; if (cpi->max_drop_count > cpi->max_consec_dropped_frames) cpi->max_drop_count = cpi->max_consec_dropped_frames; } // Update the buffer level variable. if (cpi->common.refresh_alt_ref_frame) cpi->bits_off_target -= cpi->projected_frame_size; else cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size; // Rolling monitors of whether we are over or underspending used to help regulate min and Max Q in two pass. cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4; cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4; cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32; cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32; // Actual bits spent cpi->total_actual_bits += cpi->projected_frame_size; // Debug stats cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size); cpi->buffer_level = cpi->bits_off_target; // Update bits left to the kf and gf groups to account for overshoot or undershoot on these frames if (cm->frame_type == KEY_FRAME) { cpi->kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; if (cpi->kf_group_bits < 0) cpi->kf_group_bits = 0 ; } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) { cpi->gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; if (cpi->gf_group_bits < 0) cpi->gf_group_bits = 0 ; } if (cm->frame_type != KEY_FRAME) { if (cpi->common.refresh_alt_ref_frame) { cpi->last_skip_false_probs[2] = cpi->prob_skip_false; cpi->last_skip_probs_q[2] = cm->base_qindex; } else if (cpi->common.refresh_golden_frame) { cpi->last_skip_false_probs[1] = cpi->prob_skip_false; cpi->last_skip_probs_q[1] = cm->base_qindex; } else { cpi->last_skip_false_probs[0] = cpi->prob_skip_false; cpi->last_skip_probs_q[0] = cm->base_qindex; //update the baseline cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false; } } #if 0 && CONFIG_PSNR { FILE *f = fopen("tmp.stt", "a"); vp8_clear_system_state(); //__asm emms; if (cpi->total_coded_error_left != 0.0) fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %6ld %6ld" "%6ld %6ld %5ld %5ld %5ld %8ld %8.2f %10d %10.3f" "%10.3f %8ld\n", cpi->common.current_video_frame, cpi->this_frame_target, cpi->projected_frame_size, (cpi->projected_frame_size - cpi->this_frame_target), (int)cpi->total_target_vs_actual, (cpi->oxcf.starting_buffer_level-cpi->bits_off_target), (int)cpi->total_actual_bits, cm->base_qindex, cpi->active_best_quality, cpi->active_worst_quality, cpi->avg_frame_qindex, cpi->zbin_over_quant, cm->refresh_golden_frame, cm->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->est_max_qcorrection_factor, (int)cpi->bits_left, cpi->total_coded_error_left, (double)cpi->bits_left / cpi->total_coded_error_left, cpi->tot_recode_hits); else fprintf(f, "%10d %10d %10d %10d %10d %10d %10d %10d %6ld %6ld" "%6ld %6ld %5ld %5ld %5ld %8ld %8.2f %10d %10.3f" "%8ld\n", cpi->common.current_video_frame, cpi->this_frame_target, cpi->projected_frame_size, (cpi->projected_frame_size - cpi->this_frame_target), (int)cpi->total_target_vs_actual, (cpi->oxcf.starting_buffer_level-cpi->bits_off_target), (int)cpi->total_actual_bits, cm->base_qindex, cpi->active_best_quality, cpi->active_worst_quality, cpi->avg_frame_qindex, cpi->zbin_over_quant, cm->refresh_golden_frame, cm->refresh_alt_ref_frame, cm->frame_type, cpi->gfu_boost, cpi->est_max_qcorrection_factor, (int)cpi->bits_left, cpi->total_coded_error_left, cpi->tot_recode_hits); fclose(f); { FILE *fmodes = fopen("Modes.stt", "a"); int i; fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame, cm->frame_type, cm->refresh_golden_frame, cm->refresh_alt_ref_frame); for (i = 0; i < MAX_MODES; i++) fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]); fprintf(fmodes, "\n"); fclose(fmodes); } } #endif // If this was a kf or Gf note the Q if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || cm->refresh_alt_ref_frame) cm->last_kf_gf_q = cm->base_qindex; if (cm->refresh_golden_frame == 1) cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN; else cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN; if (cm->refresh_alt_ref_frame == 1) cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF; else cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF; if (cm->refresh_last_frame & cm->refresh_golden_frame) // both refreshed cpi->gold_is_last = 1; else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other cpi->gold_is_last = 0; if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) // both refreshed cpi->alt_is_last = 1; else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) // 1 refreshed but not the other cpi->alt_is_last = 0; if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) // both refreshed cpi->gold_is_alt = 1; else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) // 1 refreshed but not the other cpi->gold_is_alt = 0; cpi->ref_frame_flags = VP8_ALT_FLAG | VP8_GOLD_FLAG | VP8_LAST_FLAG; if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FLAG; if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALT_FLAG; if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALT_FLAG; if (cpi->oxcf.error_resilient_mode) { // Is this an alternate reference update if (cpi->common.refresh_alt_ref_frame) vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->alt_fb_idx]); if (cpi->common.refresh_golden_frame) vp8_yv12_copy_frame_ptr(cm->frame_to_show, &cm->yv12_fb[cm->gld_fb_idx]); } else { if (cpi->oxcf.play_alternate && cpi->common.refresh_alt_ref_frame) // Update the alternate reference frame and stats as appropriate. update_alt_ref_frame_and_stats(cpi); else // Update the Golden frame and golden frame and stats as appropriate. update_golden_frame_and_stats(cpi); } if (cm->frame_type == KEY_FRAME) { // Tell the caller that the frame was coded as a key frame *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY; // As this frame is a key frame the next defaults to an inter frame. cm->frame_type = INTER_FRAME; cpi->last_frame_percent_intra = 100; } else { *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY; cpi->last_frame_percent_intra = cpi->this_frame_percent_intra; } // Clear the one shot update flags for segmentation map and mode/ref loop filter deltas. cpi->mb.e_mbd.update_mb_segmentation_map = 0; cpi->mb.e_mbd.update_mb_segmentation_data = 0; cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; // Dont increment frame counters if this was an altref buffer update not a real frame if (cm->show_frame) { cm->current_video_frame++; cpi->frames_since_key++; } // reset to normal state now that we are done. #if 0 { char filename[512]; FILE *recon_file; sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame); recon_file = fopen(filename, "wb"); fwrite(cm->yv12_fb[cm->lst_fb_idx].buffer_alloc, cm->yv12_fb[cm->lst_fb_idx].frame_size, 1, recon_file); fclose(recon_file); } #endif // DEBUG //vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); } int vp8_is_gf_update_needed(VP8_PTR ptr) { VP8_COMP *cpi = (VP8_COMP *) ptr; int ret_val; ret_val = cpi->gf_update_recommended; cpi->gf_update_recommended = 0; return ret_val; } void vp8_check_gf_quality(VP8_COMP *cpi) { VP8_COMMON *cm = &cpi->common; int gf_active_pct = (100 * cpi->gf_active_count) / (cm->mb_rows * cm->mb_cols); int gf_ref_usage_pct = (cpi->count_mb_ref_frame_usage[GOLDEN_FRAME] * 100) / (cm->mb_rows * cm->mb_cols); int last_ref_zz_useage = (cpi->inter_zz_count * 100) / (cm->mb_rows * cm->mb_cols); // Gf refresh is not currently being signalled if (cpi->gf_update_recommended == 0) { if (cpi->common.frames_since_golden > 7) { // Low use of gf if ((gf_active_pct < 10) || ((gf_active_pct + gf_ref_usage_pct) < 15)) { // ...but last frame zero zero usage is reasonbable so a new gf might be appropriate if (last_ref_zz_useage >= 25) { cpi->gf_bad_count ++; if (cpi->gf_bad_count >= 8) // Check that the condition is stable { cpi->gf_update_recommended = 1; cpi->gf_bad_count = 0; } } else cpi->gf_bad_count = 0; // Restart count as the background is not stable enough } else cpi->gf_bad_count = 0; // Gf useage has picked up so reset count } } // If the signal is set but has not been read should we cancel it. else if (last_ref_zz_useage < 15) { cpi->gf_update_recommended = 0; cpi->gf_bad_count = 0; } #if 0 { FILE *f = fopen("gfneeded.stt", "a"); fprintf(f, "%10d %10d %10d %10d %10ld \n", cm->current_video_frame, cpi->common.frames_since_golden, gf_active_pct, gf_ref_usage_pct, cpi->gf_update_recommended); fclose(f); } #endif } #if !(CONFIG_REALTIME_ONLY) static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) { if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi); encode_frame_to_data_rate(cpi, size, dest, frame_flags); cpi->bits_left -= 8 * *size; if (!cpi->common.refresh_alt_ref_frame) { double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth *cpi->oxcf.two_pass_vbrmin_section / 100); cpi->bits_left += (long long)(two_pass_min_rate / cpi->oxcf.frame_rate); } } #endif //For ARM NEON, d8-d15 are callee-saved registers, and need to be saved by us. #if HAVE_ARMV7 extern void vp8_push_neon(INT64 *store); extern void vp8_pop_neon(INT64 *store); #endif int vp8_receive_raw_frame(VP8_PTR ptr, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, INT64 time_stamp, INT64 end_time) { INT64 store_reg[8]; VP8_COMP *cpi = (VP8_COMP *) ptr; VP8_COMMON *cm = &cpi->common; struct vpx_usec_timer timer; if (!cpi) return -1; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_push_neon(store_reg); } #endif vpx_usec_timer_start(&timer); // no more room for frames; if (cpi->source_buffer_count != 0 && cpi->source_buffer_count >= cpi->oxcf.lag_in_frames) { #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return -1; } //printf("in-cpi->source_buffer_count: %d\n", cpi->source_buffer_count); cm->clr_type = sd->clrtype; // make a copy of the frame for use later... #if !(CONFIG_REALTIME_ONLY) if (cpi->oxcf.allow_lag) { int which_buffer = cpi->source_encode_index - 1; SOURCE_SAMPLE *s; if (which_buffer == -1) which_buffer = cpi->oxcf.lag_in_frames - 1; if (cpi->source_buffer_count < cpi->oxcf.lag_in_frames - 1) which_buffer = cpi->source_buffer_count; s = &cpi->src_buffer[which_buffer]; s->source_time_stamp = time_stamp; s->source_end_time_stamp = end_time; s->source_frame_flags = frame_flags; vp8_yv12_copy_frame_ptr(sd, &s->source_buffer); cpi->source_buffer_count ++; } else #endif { SOURCE_SAMPLE *s; s = &cpi->src_buffer[0]; s->source_end_time_stamp = end_time; s->source_time_stamp = time_stamp; s->source_frame_flags = frame_flags; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_yv12_copy_src_frame_func_neon(sd, &s->source_buffer); } #if CONFIG_RUNTIME_CPU_DETECT else #endif #endif #if !HAVE_ARMV7 || CONFIG_RUNTIME_CPU_DETECT { vp8_yv12_copy_frame_ptr(sd, &s->source_buffer); } #endif cpi->source_buffer_count = 1; } vpx_usec_timer_mark(&timer); cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, INT64 *time_stamp, INT64 *time_end, int flush) { INT64 store_reg[8]; VP8_COMP *cpi = (VP8_COMP *) ptr; VP8_COMMON *cm = &cpi->common; struct vpx_usec_timer tsctimer; struct vpx_usec_timer ticktimer; struct vpx_usec_timer cmptimer; if (!cpi) return -1; #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_push_neon(store_reg); } #endif vpx_usec_timer_start(&cmptimer); // flush variable tells us that even though we have less than 10 frames // in our buffer we need to start producing compressed frames. // Probably because we are at the end of a file.... if ((cpi->source_buffer_count == cpi->oxcf.lag_in_frames && cpi->oxcf.lag_in_frames > 0) || (!cpi->oxcf.allow_lag && cpi->source_buffer_count > 0) || (flush && cpi->source_buffer_count > 0)) { SOURCE_SAMPLE *s; s = &cpi->src_buffer[cpi->source_encode_index]; cpi->source_time_stamp = s->source_time_stamp; cpi->source_end_time_stamp = s->source_end_time_stamp; #if !(CONFIG_REALTIME_ONLY) // Should we code an alternate reference frame if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate && cpi->source_alt_ref_pending && (cpi->frames_till_gf_update_due < cpi->source_buffer_count) && cpi->oxcf.lag_in_frames != 0) { cpi->last_alt_ref_sei = (cpi->source_encode_index + cpi->frames_till_gf_update_due) % cpi->oxcf.lag_in_frames; #if VP8_TEMPORAL_ALT_REF if (cpi->oxcf.arnr_max_frames > 0) { #if 0 // my attempt at a loop that tests the results of strength filter. int start_frame = cpi->last_alt_ref_sei - 3; int i, besti = -1, pastin = cpi->oxcf.arnr_strength; int besterr; if (start_frame < 0) start_frame += cpi->oxcf.lag_in_frames; besterr = vp8_calc_low_ss_err(&cpi->src_buffer[cpi->last_alt_ref_sei].source_buffer, &cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance)); for (i = 0; i < 7; i++) { int thiserr; cpi->oxcf.arnr_strength = i; vp8cx_temp_filter_c(cpi); thiserr = vp8_calc_low_ss_err(&cpi->alt_ref_buffer.source_buffer, &cpi->src_buffer[start_frame].source_buffer, IF_RTCD(&cpi->rtcd.variance)); if (10 * thiserr < besterr * 8) { besterr = thiserr; besti = i; } } if (besti != -1) { cpi->oxcf.arnr_strength = besti; vp8cx_temp_filter_c(cpi); s = &cpi->alt_ref_buffer; // FWG not sure if I need to copy this data for the Alt Ref frame s->source_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_time_stamp; s->source_end_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_end_time_stamp; s->source_frame_flags = cpi->src_buffer[cpi->last_alt_ref_sei].source_frame_flags; } else s = &cpi->src_buffer[cpi->last_alt_ref_sei]; #else vp8cx_temp_filter_c(cpi); s = &cpi->alt_ref_buffer; // FWG not sure if I need to copy this data for the Alt Ref frame s->source_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_time_stamp; s->source_end_time_stamp = cpi->src_buffer[cpi->last_alt_ref_sei].source_end_time_stamp; s->source_frame_flags = cpi->src_buffer[cpi->last_alt_ref_sei].source_frame_flags; #endif } else #endif s = &cpi->src_buffer[cpi->last_alt_ref_sei]; cm->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due; cm->refresh_alt_ref_frame = 1; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 0; cm->show_frame = 0; cpi->source_alt_ref_pending = FALSE; // Clear Pending altf Ref flag. cpi->is_src_frame_alt_ref = 0; cpi->is_next_src_alt_ref = 0; } else #endif { cm->show_frame = 1; #if !(CONFIG_REALTIME_ONLY) if (cpi->oxcf.allow_lag) { if (cpi->source_encode_index == cpi->last_alt_ref_sei) { cpi->is_src_frame_alt_ref = 1; cpi->last_alt_ref_sei = -1; } else cpi->is_src_frame_alt_ref = 0; cpi->source_encode_index = (cpi->source_encode_index + 1) % cpi->oxcf.lag_in_frames; if(cpi->source_encode_index == cpi->last_alt_ref_sei) cpi->is_next_src_alt_ref = 1; else cpi->is_next_src_alt_ref = 0; } #endif cpi->source_buffer_count--; } cpi->un_scaled_source = &s->source_buffer; cpi->Source = &s->source_buffer; cpi->source_frame_flags = s->source_frame_flags; *time_stamp = cpi->source_time_stamp; *time_end = cpi->source_end_time_stamp; } else { *size = 0; #if !(CONFIG_REALTIME_ONLY) if (flush && cpi->pass == 1 && !cpi->first_pass_done) { vp8_end_first_pass(cpi); /* get last stats packet */ cpi->first_pass_done = 1; } #endif #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return -1; } *frame_flags = cpi->source_frame_flags; #if CONFIG_PSNR if (cpi->source_time_stamp < cpi->first_time_stamp_ever) cpi->first_time_stamp_ever = cpi->source_time_stamp; #endif // adjust frame rates based on timestamps given if (!cm->refresh_alt_ref_frame) { if (cpi->last_time_stamp_seen == 0) { double this_fps = 10000000.000 / (cpi->source_end_time_stamp - cpi->source_time_stamp); vp8_new_frame_rate(cpi, this_fps); } else { long long nanosecs = cpi->source_time_stamp - cpi->last_time_stamp_seen; double this_fps = 10000000.000 / nanosecs; vp8_new_frame_rate(cpi, (7 * cpi->oxcf.frame_rate + this_fps) / 8); } cpi->last_time_stamp_seen = cpi->source_time_stamp; } if (cpi->compressor_speed == 2) { vp8_check_gf_quality(cpi); } if (!cpi) { #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } if (cpi->compressor_speed == 2) { vpx_usec_timer_start(&tsctimer); vpx_usec_timer_start(&ticktimer); } // start with a 0 size frame *size = 0; // Clear down mmx registers vp8_clear_system_state(); //__asm emms; cm->frame_type = INTER_FRAME; cm->frame_flags = *frame_flags; #if 0 if (cm->refresh_alt_ref_frame) { //cm->refresh_golden_frame = 1; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 0; } else { cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; } #endif #if !(CONFIG_REALTIME_ONLY) if (cpi->pass == 1) { Pass1Encode(cpi, size, dest, frame_flags); } else if (cpi->pass == 2) { Pass2Encode(cpi, size, dest, frame_flags); } else #endif encode_frame_to_data_rate(cpi, size, dest, frame_flags); if (cpi->compressor_speed == 2) { unsigned int duration, duration2; vpx_usec_timer_mark(&tsctimer); vpx_usec_timer_mark(&ticktimer); duration = vpx_usec_timer_elapsed(&ticktimer); duration2 = (unsigned int)((double)duration / 2); if (cm->frame_type != KEY_FRAME) { if (cpi->avg_encode_time == 0) cpi->avg_encode_time = duration; else cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3; } if (duration2) { //if(*frame_flags!=1) { if (cpi->avg_pick_mode_time == 0) cpi->avg_pick_mode_time = duration2; else cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3; } } } if (cm->refresh_entropy_probs == 0) { vpx_memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc)); } // if its a dropped frame honor the requests on subsequent frames if (*size > 0) { // return to normal state cm->refresh_entropy_probs = 1; cm->refresh_alt_ref_frame = 0; cm->refresh_golden_frame = 0; cm->refresh_last_frame = 1; cm->frame_type = INTER_FRAME; } cpi->ready_for_new_frame = 1; vpx_usec_timer_mark(&cmptimer); cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) generate_psnr_packet(cpi); #if CONFIG_PSNR if (cpi->pass != 1) { cpi->bytes += *size; if (cm->show_frame) { cpi->count ++; if (cpi->b_calculate_psnr) { double y, u, v; double sq_error; double frame_psnr = vp8_calc_psnr(cpi->Source, cm->frame_to_show, &y, &u, &v, &sq_error); cpi->total_y += y; cpi->total_u += u; cpi->total_v += v; cpi->total_sq_error += sq_error; cpi->total += frame_psnr; { double y2, u2, v2, frame_psnr2, frame_ssim2 = 0; double weight = 0; vp8_deblock(cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0, IF_RTCD(&cm->rtcd.postproc)); vp8_clear_system_state(); frame_psnr2 = vp8_calc_psnr(cpi->Source, &cm->post_proc_buffer, &y2, &u2, &v2, &sq_error); frame_ssim2 = vp8_calc_ssim(cpi->Source, &cm->post_proc_buffer, 1, &weight); cpi->summed_quality += frame_ssim2 * weight; cpi->summed_weights += weight; cpi->totalp_y += y2; cpi->totalp_u += u2; cpi->totalp_v += v2; cpi->totalp += frame_psnr2; cpi->total_sq_error2 += sq_error; } } if (cpi->b_calculate_ssimg) { double y, u, v, frame_all; frame_all = vp8_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v); cpi->total_ssimg_y += y; cpi->total_ssimg_u += u; cpi->total_ssimg_v += v; cpi->total_ssimg_all += frame_all; } } } #if 0 if (cpi->common.frame_type != 0 && cpi->common.base_qindex == cpi->oxcf.worst_allowed_q) { skiptruecount += cpi->skip_true_count; skipfalsecount += cpi->skip_false_count; } #endif #if 0 if (cpi->pass != 1) { FILE *f = fopen("skip.stt", "a"); fprintf(f, "frame:%4d flags:%4x Q:%4d P:%4d Size:%5d\n", cpi->common.current_video_frame, *frame_flags, cpi->common.base_qindex, cpi->prob_skip_false, *size); if (cpi->is_src_frame_alt_ref == 1) fprintf(f, "skipcount: %4d framesize: %d\n", cpi->skip_true_count , *size); fclose(f); } #endif #endif #if HAVE_ARMV7 #if CONFIG_RUNTIME_CPU_DETECT if (cm->rtcd.flags & HAS_NEON) #endif { vp8_pop_neon(store_reg); } #endif return 0; } int vp8_get_preview_raw_frame(VP8_PTR comp, YV12_BUFFER_CONFIG *dest, int deblock_level, int noise_level, int flags) { VP8_COMP *cpi = (VP8_COMP *) comp; if (cpi->common.refresh_alt_ref_frame) return -1; else { int ret; #if CONFIG_POSTPROC ret = vp8_post_proc_frame(&cpi->common, dest, deblock_level, noise_level, flags); #else if (cpi->common.frame_to_show) { *dest = *cpi->common.frame_to_show; dest->y_width = cpi->common.Width; dest->y_height = cpi->common.Height; dest->uv_height = cpi->common.Height / 2; ret = 0; } else { ret = -1; } #endif //!CONFIG_POSTPROC vp8_clear_system_state(); return ret; } } int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) { VP8_COMP *cpi = (VP8_COMP *) comp; signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols) return -1; if (!map) { disable_segmentation((VP8_PTR)cpi); return 0; } // Set the segmentation Map set_segmentation_map((VP8_PTR)cpi, map); // Activate segmentation. enable_segmentation((VP8_PTR)cpi); // Set up the quant segment data feature_data[MB_LVL_ALT_Q][0] = delta_q[0]; feature_data[MB_LVL_ALT_Q][1] = delta_q[1]; feature_data[MB_LVL_ALT_Q][2] = delta_q[2]; feature_data[MB_LVL_ALT_Q][3] = delta_q[3]; // Set up the loop segment data s feature_data[MB_LVL_ALT_LF][0] = delta_lf[0]; feature_data[MB_LVL_ALT_LF][1] = delta_lf[1]; feature_data[MB_LVL_ALT_LF][2] = delta_lf[2]; feature_data[MB_LVL_ALT_LF][3] = delta_lf[3]; cpi->segment_encode_breakout[0] = threshold[0]; cpi->segment_encode_breakout[1] = threshold[1]; cpi->segment_encode_breakout[2] = threshold[2]; cpi->segment_encode_breakout[3] = threshold[3]; // Initialise the feature data structure // SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1 set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA); return 0; } int vp8_set_active_map(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned int cols) { VP8_COMP *cpi = (VP8_COMP *) comp; if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) { if (map) { vpx_memcpy(cpi->active_map, map, rows * cols); cpi->active_map_enabled = 1; } else cpi->active_map_enabled = 0; return 0; } else { //cpi->active_map_enabled = 0; return -1 ; } } int vp8_set_internal_size(VP8_PTR comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) { VP8_COMP *cpi = (VP8_COMP *) comp; if (horiz_mode >= NORMAL && horiz_mode <= ONETWO) cpi->common.horiz_scale = horiz_mode; else return -1; if (vert_mode >= NORMAL && vert_mode <= ONETWO) cpi->common.vert_scale = vert_mode; else return -1; return 0; } int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) { int i, j; int Total = 0; unsigned char *src = source->y_buffer; unsigned char *dst = dest->y_buffer; (void)rtcd; // Loop through the Y plane raw and reconstruction data summing (square differences) for (i = 0; i < source->y_height; i += 16) { for (j = 0; j < source->y_width; j += 16) { unsigned int sse; Total += VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse); } src += 16 * source->y_stride; dst += 16 * dest->y_stride; } return Total; } int vp8_calc_low_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, const vp8_variance_rtcd_vtable_t *rtcd) { int i, j; int Total = 0; unsigned char *src = source->y_buffer; unsigned char *dst = dest->y_buffer; (void)rtcd; // Loop through the Y plane raw and reconstruction data summing (square differences) for (i = 0; i < source->y_height; i += 16) { for (j = 0; j < source->y_width; j += 16) { unsigned int sse; VARIANCE_INVOKE(rtcd, mse16x16)(src + j, source->y_stride, dst + j, dest->y_stride, &sse); if (sse < 8096) Total += sse; } src += 16 * source->y_stride; dst += 16 * dest->y_stride; } return Total; } int vp8_get_speed(VP8_PTR c) { VP8_COMP *cpi = (VP8_COMP *) c; return cpi->Speed; } int vp8_get_quantizer(VP8_PTR c) { VP8_COMP *cpi = (VP8_COMP *) c; return cpi->common.base_qindex; }
vertex_miner.h
#ifndef VERTEX_MINER_H #define VERTEX_MINER_H #include "miner.h" typedef std::unordered_map<BaseEmbedding, Frequency> SimpleMap; typedef QuickPattern<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrQPattern; // structural quick pattern typedef CanonicalGraph<EdgeInducedEmbedding<StructuralElement>, StructuralElement> StrCPattern; // structural canonical pattern typedef std::unordered_map<StrQPattern, Frequency> StrQpMapFreq; // mapping structural quick pattern to its frequency typedef std::unordered_map<StrCPattern, Frequency> StrCgMapFreq; // mapping structural canonical pattern to its frequency typedef PerThreadStorage<StrQpMapFreq> LocalStrQpMapFreq; typedef PerThreadStorage<StrCgMapFreq> LocalStrCgMapFreq; class VertexMiner : public Miner { public: VertexMiner(Graph *g, unsigned size = 3, int nthreads = 1) { graph = g; max_size = size; degree_counting(); numThreads = nthreads; } virtual ~VertexMiner() {} // extension for vertex-induced motif inline void extend_vertex(unsigned level, EmbeddingList& emb_list) { UintList num_new_emb(emb_list.size()); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { VertexEmbedding emb(level+1); get_embedding<VertexEmbedding>(level, pos, emb_list, emb); num_new_emb[pos] = 0; unsigned n = emb.size(); for (unsigned i = 0; i < n; ++i) { VertexId src = emb.get_vertex(i); //for (auto e : graph->edges(src)) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); if (!is_vertexInduced_automorphism(emb, i, src, dst)) { num_new_emb[pos] ++; } } } } UintList indices = parallel_prefix_sum<unsigned>(num_new_emb); num_new_emb.clear(); auto new_size = indices.back(); assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32 std::cout << "number of new embeddings: " << new_size << "\n"; emb_list.add_level(new_size); #ifdef USE_WEDGE if (level == 1 && max_size == 4) { is_wedge.resize(emb_list.size()); std::fill(is_wedge.begin(), is_wedge.end(), 0); } #endif for (size_t pos = 0; pos < emb_list.size(level); pos ++) { VertexEmbedding emb(level+1); get_embedding<VertexEmbedding>(level, pos, emb_list, emb); auto start = indices[pos]; auto n = emb.size(); for (unsigned i = 0; i < n; ++i) { VertexId src = emb.get_vertex(i); //for (auto e : graph->edges(src)) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); if (!is_vertexInduced_automorphism(emb, i, src, dst)) { assert(start < indices.back()); if (n == 2 && max_size == 4) emb_list.set_pid(start, find_motif_pattern_id(n, i, dst, emb, start)); emb_list.set_idx(level+1, start, pos); emb_list.set_vid(level+1, start++, dst); } } } } indices.clear(); } // extension for vertex-induced clique inline void extend_vertex(unsigned level, EmbeddingList& emb_list, Accumulator<uint64_t> &num) { UintList num_new_emb(emb_list.size()); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { BaseEmbedding emb(level+1); get_embedding<BaseEmbedding>(level, pos, emb_list, emb); VertexId vid = emb_list.get_vid(level, pos); num_new_emb[pos] = 0; //std::cout << "current embeddings: " << emb << "\n"; //for (auto e : graph->edges(vid)) { IndexT row_begin = graph->edge_begin(vid); IndexT row_end = graph->edge_end(vid); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); //printf("Examine edge(%d,%d)\n", vid, dst); if (is_all_connected_dag(dst, emb, level)) { //std::cout << "clique found\n"; if (level < max_size-2) num_new_emb[pos] ++; else num += 1; } } } //std::cout << "number of cliques: " << num.reduce() << "\n"; if (level == max_size-2) return; UintList indices = parallel_prefix_sum<unsigned>(num_new_emb); num_new_emb.clear(); auto new_size = indices.back(); assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32 std::cout << "number of new embeddings: " << new_size << "\n"; emb_list.add_level(new_size); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(level); pos ++) { BaseEmbedding emb(level+1); get_embedding<BaseEmbedding>(level, pos, emb_list, emb); VertexId vid = emb_list.get_vid(level, pos); unsigned start = indices[pos]; //for (auto e : graph->edges(vid)) { IndexT row_begin = graph->edge_begin(vid); IndexT row_end = graph->edge_end(vid); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); if (is_all_connected_dag(dst, emb, level)) { emb_list.set_idx(level+1, start, pos); emb_list.set_vid(level+1, start++, dst); } } } indices.clear(); } inline void aggregate(unsigned level, EmbeddingList& emb_list, std::vector<UlongAccu> &accumulators) { #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(); pos ++) { VertexEmbedding emb(level+1); get_embedding<VertexEmbedding>(level, pos, emb_list, emb); unsigned n = emb.size(); if (n == 3) emb.set_pid(emb_list.get_pid(pos)); for (unsigned i = 0; i < n; ++i) { VertexId src = emb.get_vertex(i); //for (auto e : graph->edges(src)) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); if (!is_vertexInduced_automorphism(emb, i, src, dst)) { assert(n < 4); unsigned pid = find_motif_pattern_id(n, i, dst, emb, pos); assert(pid < accumulators.size()); //printf("pid = %u\n", pid); accumulators[pid] += 1; } } } emb.clean(); } } inline void quick_aggregate(unsigned level, const EmbeddingList& emb_list) { for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear(); #pragma omp parallel for for (size_t pos = 0; pos < emb_list.size(level); pos ++) { int tid = omp_get_thread_num(); StrQpMapFreq* qp_map = qp_localmaps.getLocal(tid); VertexEmbedding emb(level+1); get_embedding<VertexEmbedding>(level, pos, emb_list, emb); unsigned n = emb.size(); for (unsigned i = 0; i < n; ++i) { VertexId src = emb.get_vertex(i); //for (auto e : graph->edges(src)) { IndexT row_begin = graph->edge_begin(src); IndexT row_end = graph->edge_end(src); for (IndexT e = row_begin; e < row_end; e++) { IndexT dst = graph->getEdgeDst(e); if (!is_vertexInduced_automorphism(emb, i, src, dst)) { std::vector<bool> connected; get_connectivity(n, i, dst, emb, connected); StrQPattern qp(n+1, connected); if (qp_map->find(qp) != qp_map->end()) { (*qp_map)[qp] += 1; qp.clean(); } else (*qp_map)[qp] = 1; } } } emb.clean(); } } // canonical pattern aggregation inline void canonical_aggregate() { for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear(); //auto it = qp_map.begin(); //#pragma omp parallel for /* for (auto i = 0; i < qp_map.size(); i++) { StrCgMapFreq* cg_map = cg_localmaps.getLocal(i); StrCPattern cg(it->first); if (cg_map->find(cg) != cg_map->end()) (*cg_map)[cg] += it->second; else (*cg_map)[cg] = it->second; cg.clean(); } qp_map.clear(); */ } inline void merge_qp_map() { qp_map.clear(); for (unsigned i = 0; i < qp_localmaps.size(); i++) { StrQpMapFreq qp_lmap = *qp_localmaps.getLocal(i); for (auto element : qp_lmap) { if (qp_map.find(element.first) != qp_map.end()) qp_map[element.first] += element.second; else qp_map[element.first] = element.second; } } } inline void merge_cg_map() { cg_map.clear(); for (unsigned i = 0; i < cg_localmaps.size(); i++) { StrCgMapFreq cg_lmap = *cg_localmaps.getLocal(i); for (auto element : cg_lmap) { if (cg_map.find(element.first) != cg_map.end()) cg_map[element.first] += element.second; else cg_map[element.first] = element.second; } } } // Utilities void printout_motifs(std::vector<UlongAccu> &accumulators) { std::cout << std::endl; if (accumulators.size() == 2) { std::cout << "\ttriangles\t" << accumulators[0].reduce() << std::endl; std::cout << "\t3-chains\t" << accumulators[1].reduce() << std::endl; } else if (accumulators.size() == 6) { std::cout << "\t4-paths --> " << accumulators[0].reduce() << std::endl; std::cout << "\t3-stars --> " << accumulators[1].reduce() << std::endl; std::cout << "\t4-cycles --> " << accumulators[2].reduce() << std::endl; std::cout << "\ttailed-triangles --> " << accumulators[3].reduce() << std::endl; std::cout << "\tdiamonds --> " << accumulators[4].reduce() << std::endl; std::cout << "\t4-cliques --> " << accumulators[5].reduce() << std::endl; } else { std::cout << "\ttoo many patterns to show\n"; } std::cout << std::endl; } void printout_motifs(UintMap &p_map) { assert(p_map.size() == 21); std::cout << std::endl; for (auto it = p_map.begin(); it != p_map.end(); ++it) std::cout << "{" << it->first << "} --> " << it->second << std::endl; std::cout << std::endl; } void printout_motifs() { std::cout << std::endl; for (auto it = cg_map.begin(); it != cg_map.end(); ++it) std::cout << it->first << " --> " << it->second << std::endl; std::cout << std::endl; } private: //unsigned num_cliques; unsigned max_size; int numThreads; std::vector<unsigned> is_wedge; // indicate a 3-vertex embedding is a wedge or chain (v0-cntered or v1-centered) StrQpMapFreq qp_map; // quick patterns map for counting the frequency StrCgMapFreq cg_map; // canonical graph map for couting the frequency LocalStrQpMapFreq qp_localmaps; // quick patterns local map for each thread LocalStrCgMapFreq cg_localmaps; // canonical graph local map for each thread template <typename EmbeddingTy> inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EmbeddingTy &emb) { VertexId vid = emb_list.get_vid(level, pos); IndexTy idx = emb_list.get_idx(level, pos); ElementType ele(vid); emb.set_element(level, ele); // backward constructing the embedding for (unsigned l = 1; l < level; l ++) { VertexId u = emb_list.get_vid(level-l, idx); ElementType ele(u); emb.set_element(level-l, ele); idx = emb_list.get_idx(level-l, idx); } ElementType ele0(idx); emb.set_element(0, ele0); } inline bool is_vertexInduced_automorphism(const VertexEmbedding& emb, unsigned idx, VertexId src, VertexId dst) { unsigned n = emb.size(); // the new vertex id should be larger than the first vertex id if (dst <= emb.get_vertex(0)) return true; // the new vertex should not already exist in the embedding for (unsigned i = 1; i < n; ++i) if (dst == emb.get_vertex(i)) return true; // the new vertex should not already be extended by any previous vertex in the embedding for (unsigned i = 0; i < idx; ++i) if (is_connected(emb.get_vertex(i), dst)) return true; // the new vertex id should be larger than any vertex id after its source vertex in the embedding for (unsigned i = idx+1; i < n; ++i) if (dst < emb.get_vertex(i)) return true; return false; } inline unsigned find_motif_pattern_id(unsigned n, unsigned idx, VertexId dst, const VertexEmbedding& emb, unsigned pos = 0) { unsigned pid = 0; if (n == 2) { // count 3-motifs pid = 1; // 3-chain if (idx == 0) { if (is_connected(emb.get_vertex(1), dst)) pid = 0; // triangle #ifdef USE_WEDGE else if (max_size == 4) is_wedge[pos] = 1; // wedge; used for 4-motif #endif } } else if (n == 3) { // count 4-motifs unsigned num_edges = 1; pid = emb.get_pid(); if (pid == 0) { // extending a triangle for (unsigned j = idx+1; j < n; j ++) if (is_connected(emb.get_vertex(j), dst)) num_edges ++; pid = num_edges + 2; // p3: tailed-triangle; p4: diamond; p5: 4-clique } else { // extending a 3-chain assert(pid == 1); std::vector<bool> connected(3, false); connected[idx] = true; for (unsigned j = idx+1; j < n; j ++) { if (is_connected(emb.get_vertex(j), dst)) { num_edges ++; connected[j] = true; } } if (num_edges == 1) { pid = 0; // p0: 3-path unsigned center = 1; #ifdef USE_WEDGE if (is_wedge[pos]) center = 0; #else center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0; #endif if (idx == center) pid = 1; // p1: 3-star } else if (num_edges == 2) { pid = 2; // p2: 4-cycle unsigned center = 1; #ifdef USE_WEDGE if (is_wedge[pos]) center = 0; #else center = is_connected(emb.get_vertex(1), emb.get_vertex(2)) ? 1 : 0; #endif if (connected[center]) pid = 3; // p3: tailed-triangle } else { pid = 4; // p4: diamond } } } else { // count 5-motif and beyond std::vector<bool> connected; get_connectivity(n, idx, dst, emb, connected); Matrix A(n+1, std::vector<MatType>(n+1, 0)); gen_adj_matrix(n+1, connected, A); std::vector<MatType> c(n+1, 0); char_polynomial(n+1, A, c); bliss::UintSeqHash h; for (unsigned i = 0; i < n+1; ++i) h.update((unsigned)c[i]); pid = h.get_value(); } return pid; } }; #endif // VERTEX_MINER_HPP_
test.c
#include <stdio.h> #include "../utilities/check.h" #define N 100 #pragma omp declare target #pragma omp declare simd int foo(int k) { return k+1; } #pragma omp declare simd simdlen(16) int foo_simdlen(int k) { return k+1; } #pragma omp declare simd linear(b:1) int foo_linear(int *b) { return *b+1; } #pragma omp declare simd aligned(b:8) int foo_aligned(int *b) { return *b+1; } #pragma omp declare simd linear(b:1) uniform(c) int foo_uniform(int *b, int c) { return *b+c; } #pragma omp declare simd linear(b:1) uniform(c) inbranch int foo_inbranch(int *b, int c) { return *b+c; } #pragma omp declare simd linear(b:1) uniform(c) notinbranch int foo_notinbranch(int *b, int c) { return *b+c; } #pragma omp end declare target int main() { check_offloading(); int a[N], aa[N], b[N]; int i, fail = 0; /// Test: no clauses // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { int k; #pragma omp simd for(k=0; k<N; k++) a[k] = foo(k); } // host for(i=0; i<N; i++) aa[i] = i+1; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: simdlen fail = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { int k; #pragma omp simd for(k=0; k<N; k++) a[k] = foo_simdlen(k); } // host for(i=0; i<N; i++) aa[i] = i+1; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: linear fail = 0; // initialize for(i=0; i<N; i++) { aa[i] = a[i] = -1; b[i] = i; } // offload #pragma omp target map(tofrom: a[0:100]) map(to:b[:100]) { int k; #pragma omp simd for(k=0; k<N; k++) { a[k] += foo_linear(&b[k]); // -1 += i } } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: aligned fail = 0; // initialize for(i=0; i<N; i++) { aa[i] = a[i] = -1; b[i] = i; } // offload #pragma omp target map(tofrom: a[0:100]) map(to:b[:100]) { int k; #pragma omp simd for(k=0; k<N; k++) { a[k] += foo_aligned(&b[k]); // -1 += i } } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: uniform fail = 0; // initialize for(i=0; i<N; i++) { aa[i] = a[i] = -1; b[i] = i; } // offload #pragma omp target map(tofrom: a[0:100]) map(to:b[:100]) { int k; int c = 3; #pragma omp simd for(k=0; k<N; k++) { a[k] += foo_uniform(&b[k],c); // -1 += i } } // host for(i=0; i<N; i++) aa[i] = i + 2; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: inbranch fail = 0; // initialize for(i=0; i<N; i++) { aa[i] = a[i] = -1; b[i] = i; } // offload #pragma omp target map(tofrom: a[0:100]) map(to:b[:100]) { int k; int c = 3; #pragma omp simd for(k=0; k<N; k++) { if (k%2 == 0) a[k] += foo_inbranch(&b[k],c); // -1 += i } } // host for(i=0; i<N; i++) if (i%2 == 0) aa[i] = i + 2; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); /// Test: notinbranch fail = 0; // initialize for(i=0; i<N; i++) { aa[i] = a[i] = -1; b[i] = i; } // offload #pragma omp target map(tofrom: a[0:100]) map(to:b[:100]) { int k; int c = 3; #pragma omp simd for(k=0; k<N; k++) { a[k] += foo_notinbranch(&b[k],c); // -1 += i } } // host for(i=0; i<N; i++) aa[i] = i + 2; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) { printf("%d: a %d != %d\n", i, a[i], aa[i]); fail = 1; } } // report if (fail) printf("failed\n"); else printf("success\n"); return 0; }
routines.c
#include "matrix.h" void scalar_multiply(struct COO matrix, double scalar) { int i; if (matrix.type == TYPE_INT) { #pragma omp parallel for shared(matrix,scalar) num_threads(param.threads) for (i = 0; i < matrix.count; i++) { matrix.elements[i].value.f = (double) matrix.elements[i].value.i * scalar; } } else { #pragma omp parallel for shared(matrix,scalar) num_threads(param.threads) for (i = 0; i < matrix.count; i++) { matrix.elements[i].value.f *= scalar; } } } int trace(struct CSR matrix) { int trace = 0; int i; #pragma omp parallel for reduction(+:trace) shared(matrix) num_threads(param.threads) for (i = 1; i < matrix.rows + 1; i++) { // Check num of elements in row i int elements = matrix.ia[i] - matrix.ia[i-1]; if (elements == 0) { continue; } int pos = matrix.ia[i-1]; // Starting index for ja/nnz while (pos - matrix.ia[i-1] <= elements) { if (matrix.ja[pos] == i - 1) { trace += matrix.nnz.i[pos]; break; } else if (matrix.ja[pos] > i - 1) { break; } pos++; } } return trace; } double trace_f(struct CSR matrix) { double trace = 0.0; int i; #pragma omp parallel for reduction(+:trace) shared(matrix) num_threads(param.threads) for (i = 1; i < matrix.rows + 1; i++) { // Check num of elements in row i int elements = matrix.ia[i] - matrix.ia[i-1]; if (elements == 0) { continue; } int pos = matrix.ia[i-1]; // Starting index for ja/nnz while (pos - matrix.ia[i-1] <= elements) { if (matrix.ja[pos] == i - 1) { trace += matrix.nnz.f[pos]; break; } else if (matrix.ja[pos] > i - 1) { break; } pos++; } } return trace; } struct COO matrix_addition(struct CSR matrix, struct CSR matrix2) { int totalcount = 0; int i; // Removing loop carried dependancies struct COO *result_local = allocate(matrix.rows * sizeof(struct COO)); for (i = 0; i < matrix.rows; i++) { result_local[i].count = 0; result_local[i].elements = allocate((matrix.count + matrix2.count) * sizeof(struct ELEMENT)); } #pragma omp parallel for reduction(+:totalcount) shared(matrix,matrix2) num_threads(param.threads) for (i = 0; i < matrix.rows; i++) { int elements = matrix.ia[i+1] - matrix.ia[i]; int elements2 = matrix2.ia[i+1] - matrix2.ia[i]; int m1seen = 0; int m2seen = 0; int pos = matrix.ia[i]; int pos2 = matrix2.ia[i]; while (m1seen != elements || m2seen != elements2) { if (m1seen == elements) { // Seen all matrix 1 elements while (m2seen < elements2) { result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.i = matrix2.nnz.i[pos2++]; m2seen++; } break; } else if (m2seen == elements2) { // Matrix2 no more elements while (m1seen < elements) { result_local[i].elements[result_local[i].count].y = matrix.ja[pos]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.i = matrix.nnz.i[pos++]; m1seen++; } break; } else { // Both matrices have elements if (matrix.ja[pos] < matrix2.ja[pos2]) { // Matrix1 lower ja result_local[i].elements[result_local[i].count].y = matrix.ja[pos]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.i = matrix.nnz.i[pos++]; m1seen++; } else if (matrix2.ja[pos2] < matrix.ja[pos]) { // Matrix2 lower ja result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.i = matrix2.nnz.i[pos2++]; m2seen++; } else { // Perform addition on same ja result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.i = matrix.nnz.i[pos++] + matrix2.nnz.i[pos2++]; m1seen++; m2seen++; } } } totalcount += result_local[i].count; } struct COO result; result.rows = matrix.rows; result.cols = matrix.cols; result.type = matrix.type; result.elements = allocate(totalcount * sizeof(struct ELEMENT)); result.count = 0; // Put the local copies back together for (i = 0; i < matrix.rows; i++) { if (result_local[i].count != 0) { memcpy(&result.elements[result.count], result_local[i].elements, result_local[i].count * sizeof(struct ELEMENT)); result.count += result_local[i].count; } free(result_local[i].elements); } free(result_local); return result; } struct COO matrix_addition_f(struct CSR matrix, struct CSR matrix2) { int totalcount = 0; int i; // Removing loop carried dependancies struct COO *result_local = allocate(matrix.rows * sizeof(struct COO)); for (i = 0; i < matrix.rows; i++) { result_local[i].count = 0; result_local[i].elements = allocate((matrix.count + matrix2.count) * sizeof(struct ELEMENT)); } #pragma omp parallel for reduction(+:totalcount) shared(matrix,matrix2) num_threads(param.threads) for (i = 0; i < matrix.rows; i++) { int elements = matrix.ia[i+1] - matrix.ia[i]; int elements2 = matrix2.ia[i+1] - matrix2.ia[i]; int m1seen = 0; int m2seen = 0; int pos = matrix.ia[i]; int pos2 = matrix2.ia[i]; while (m1seen != elements || m2seen != elements2) { if (m1seen == elements) { // Seen all matrix 1 elements while (m2seen < elements2) { result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.f = matrix2.nnz.f[pos2++]; m2seen++; } break; } else if (m2seen == elements2) { // Matrix2 no more elements while (m1seen < elements) { result_local[i].elements[result_local[i].count].y = matrix.ja[pos]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.f = matrix.nnz.f[pos++]; m1seen++; } break; } else { // Both matrices have elements if (matrix.ja[pos] < matrix2.ja[pos2]) { // Matrix1 lower ja result_local[i].elements[result_local[i].count].y = matrix.ja[pos]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.f = matrix.nnz.f[pos++]; m1seen++; } else if (matrix2.ja[pos2] < matrix.ja[pos]) { // Matrix2 lower ja result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.f = matrix2.nnz.f[pos2++]; m2seen++; } else { // Perform addition on same ja result_local[i].elements[result_local[i].count].y = matrix2.ja[pos2]; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].value.f = matrix.nnz.f[pos++] + matrix2.nnz.f[pos2++]; m1seen++; m2seen++; } } } totalcount += result_local[i].count; } struct COO result; result.rows = matrix.rows; result.cols = matrix.cols; result.type = matrix.type; result.elements = allocate(totalcount * sizeof(struct ELEMENT)); result.count = 0; // Put the local copies back together for (i = 0; i < matrix.rows; i++) { if (result_local[i].count != 0) { memcpy(&result.elements[result.count], result_local[i].elements, result_local[i].count * sizeof(struct ELEMENT)); result.count += result_local[i].count; } free(result_local[i].elements); } free(result_local); return result; } struct CSR transpose(struct CSC matrix) { struct CSR result; int i; result.rows = matrix.cols; result.cols = matrix.rows; result.ia = matrix.ia; result.ja = matrix.ja; result.type = matrix.type; if (matrix.type == TYPE_INT) { result.nnz.i = allocate(matrix.count * sizeof(int)); #pragma omp parallel for shared(result,matrix) num_threads(param.threads) for (i = 0; i < matrix.count; i++) { result.nnz.i[i] = matrix.nnz.i[i]; } } else { result.nnz.f = allocate(matrix.count * sizeof(double)); #pragma omp parallel for shared(result,matrix) num_threads(param.threads) for (i = 0; i < matrix.count; i++) { result.nnz.f[i] = matrix.nnz.f[i]; } } return result; } struct COO matrix_multiply(struct CSR matrix, struct CSC matrix2) { int totalcount = 0; int i; // Removing loop carried dependancies struct COO *result_local = allocate(matrix.rows * sizeof(struct COO)); for (i = 0; i < matrix.rows; i++) { result_local[i].count = 0; result_local[i].elements = allocate((matrix2.cols) * sizeof(struct ELEMENT)); } #pragma omp parallel for reduction(+:totalcount) shared(matrix,matrix2) num_threads(param.threads) for (i = 0; i < matrix.rows; i++) { int m1count = matrix.ia[i+1] - matrix.ia[i]; for (int j = 0; j < matrix2.cols; j++) { int dp = 0; // Final dot product int m1pos = matrix.ia[i]; int m1seen = 0; int m2count = matrix2.ia[j+1] - matrix2.ia[j]; int m2pos = matrix2.ia[j]; int m2seen = 0; while (m1seen != m1count && m2seen != m2count) { if (matrix.ja[m1pos] == matrix2.ja[m2pos]) { // Row and col index match dp += (matrix.nnz.i[m1pos++] * matrix2.nnz.i[m2pos++]); m1seen++; m2seen++; } else if (matrix.ja[m1pos] < matrix2.ja[m2pos]) { m1seen++; m1pos++; } else { m2seen++; m2pos++; } } if (dp != 0) { result_local[i].elements[result_local[i].count].value.i = dp; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].y = j; } } totalcount += result_local[i].count; } struct COO result; result.count = 0; result.type = matrix.type; result.elements = allocate(totalcount * sizeof(struct ELEMENT)); result.rows = matrix.rows; result.cols = matrix2.cols; // Put the local copies back together for (i = 0; i < matrix.rows; i++) { if (result_local[i].count != 0) { memcpy(&result.elements[result.count], result_local[i].elements, result_local[i].count * sizeof(struct ELEMENT)); result.count += result_local[i].count; } free(result_local[i].elements); } free(result_local); return result; } struct COO matrix_multiply_f(struct CSR matrix, struct CSC matrix2) { int totalcount = 0; int i; // Removing loop carried dependancies struct COO *result_local = allocate(matrix.rows * sizeof(struct COO)); for (i = 0; i < matrix.rows; i++) { result_local[i].count = 0; result_local[i].elements = allocate((matrix2.cols) * sizeof(struct ELEMENT)); } #pragma omp parallel for reduction(+:totalcount) shared(matrix,matrix2) num_threads(param.threads) for (i = 0; i < matrix.rows; i++) { int m1count = matrix.ia[i+1] - matrix.ia[i]; for (int j = 0; j < matrix2.cols; j++) { double dp = 0; // Final dot product int m1pos = matrix.ia[i]; int m1seen = 0; int m2count = matrix2.ia[j+1] - matrix2.ia[j]; int m2pos = matrix2.ia[j]; int m2seen = 0; while (m1seen != m1count && m2seen != m2count) { if (matrix.ja[m1pos] == matrix2.ja[m2pos]) { // Row and col index match dp += (matrix.nnz.f[m1pos++] * matrix2.nnz.f[m2pos++]); m1seen++; m2seen++; } else if (matrix.ja[m1pos] < matrix2.ja[m2pos]) { m1seen++; m1pos++; } else { m2seen++; m2pos++; } } if (dp != 0) { result_local[i].elements[result_local[i].count].value.f = dp; result_local[i].elements[result_local[i].count].x = i; result_local[i].elements[result_local[i].count++].y = j; } } totalcount += result_local[i].count; } struct COO result; result.count = 0; result.type = matrix.type; result.elements = allocate(totalcount * sizeof(struct ELEMENT)); result.rows = matrix.rows; result.cols = matrix2.cols; // Put the local copies back together for (i = 0; i < matrix.rows; i++) { if (result_local[i].count != 0) { memcpy(&result.elements[result.count], result_local[i].elements, result_local[i].count * sizeof(struct ELEMENT)); result.count += result_local[i].count; } free(result_local[i].elements); } free(result_local); return result; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; const Quantum *magick_restrict p, *magick_restrict pixels; Quantum *magick_restrict q; ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse) (void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; const char *artifact; IlluminantType illuminant = D65Illuminant; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); artifact=GetImageArtifact(image,"color:illuminant"); if (artifact != (const char *) NULL) { illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions, MagickFalse,artifact); if ((ssize_t) illuminant < 0) illuminant=UndefinedIlluminant; } start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, illuminant,&start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, illuminant,&stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; ssize_t i, y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j, n; n=0; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[j]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[j]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { ssize_t i; PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
nstream-usm-target.c
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #pragma omp requires unified_shared_memory #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_initial_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_initial_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; int host = omp_get_initial_device(); size_t bytes = length*sizeof(double); double * restrict A = omp_target_alloc(bytes, host); double * restrict B = omp_target_alloc(bytes, host); double * restrict C = omp_target_alloc(bytes, host); double scalar = 3.0; #pragma omp parallel for simd schedule(static) for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); #pragma omp target teams distribute parallel for simd schedule(static) device(device) for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; } omp_target_free(C, host); omp_target_free(B, host); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } omp_target_free(A, host); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
perftest.c
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED. * Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "api/libperf.h" #include "lib/libperf_int.h" #include <ucs/sys/string.h> #include <ucs/sys/sys.h> #include <ucs/sys/sock.h> #include <ucs/debug/log.h> #include <sys/socket.h> #include <arpa/inet.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <netdb.h> #include <getopt.h> #include <string.h> #include <sys/types.h> #include <sys/poll.h> #include <locale.h> #if defined (HAVE_MPI) # include <mpi.h> #elif defined (HAVE_RTE) # include<rte.h> #endif #define MAX_BATCH_FILES 32 #define MAX_CPUS 1024 #define TL_RESOURCE_NAME_NONE "<none>" #define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCIqM:r:T:d:x:A:BUm:" #define TEST_ID_UNDEFINED -1 enum { TEST_FLAG_PRINT_RESULTS = UCS_BIT(0), TEST_FLAG_PRINT_TEST = UCS_BIT(1), TEST_FLAG_SET_AFFINITY = UCS_BIT(8), TEST_FLAG_NUMERIC_FMT = UCS_BIT(9), TEST_FLAG_PRINT_FINAL = UCS_BIT(10), TEST_FLAG_PRINT_CSV = UCS_BIT(11) }; typedef struct sock_rte_group { int is_server; int connfd; } sock_rte_group_t; typedef struct test_type { const char *name; ucx_perf_api_t api; ucx_perf_cmd_t command; ucx_perf_test_type_t test_type; const char *desc; const char *overhead_lat; unsigned window_size; } test_type_t; typedef struct perftest_params { ucx_perf_params_t super; int test_id; } perftest_params_t; struct perftest_context { perftest_params_t params; const char *server_addr; int port; int mpi; unsigned num_cpus; unsigned cpus[MAX_CPUS]; unsigned flags; unsigned num_batch_files; char *batch_files[MAX_BATCH_FILES]; char *test_names[MAX_BATCH_FILES]; sock_rte_group_t sock_rte_group; }; test_type_t tests[] = { {"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG, "active message latency", "latency", 1}, {"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG, "atomic add latency", "latency", 1}, {"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / rate", "latency", 1}, {"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / rate", "latency", 1}, {"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / rate", "latency", 1}, {"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI, "active message bandwidth / message rate", "overhead", 1}, {"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth / message rate", "overhead", 1}, {"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add message rate", "overhead", 1}, {"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG, "tag match latency", "latency", 1}, {"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag match bandwidth", "overhead", 32}, {"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG, "tag sync match latency", "latency", 1}, {"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI, "tag sync match bandwidth", "overhead", 32}, {"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG, "put latency", "latency", 1}, {"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI, "put bandwidth", "overhead", 32}, {"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI, "get latency / bandwidth / message rate", "latency", 1}, {"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic add bandwidth / message rate", "overhead", 1}, {"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic fetch-and-add latency / bandwidth / rate", "latency", 1}, {"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic swap latency / bandwidth / rate", "latency", 1}, {"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI, "atomic compare-and-swap latency / bandwidth / rate", "latency", 1}, {"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI, "stream bandwidth", "overhead", 1}, {"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG, "stream latency", "latency", 1}, {NULL} }; static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int), int poll_events, void *data, size_t size, void (*progress)(void *arg), void *arg, const char *name) { size_t total = 0; struct pollfd pfd; int ret; while (total < size) { pfd.fd = sock; pfd.events = poll_events; pfd.revents = 0; ret = poll(&pfd, 1, 1); /* poll for 1ms */ if (ret > 0) { ucs_assert(ret == 1); ucs_assert(pfd.revents & poll_events); ret = sock_call(sock, (char*)data + total, size - total, 0); if (ret < 0) { ucs_error("%s() failed: %m", name); return -1; } total += ret; } else if ((ret < 0) && (errno != EINTR)) { ucs_error("poll(fd=%d) failed: %m", sock); return -1; } /* progress user context */ if (progress != NULL) { progress(arg); } } return 0; } static int safe_send(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { typedef ssize_t (*sock_call)(int, void *, size_t, int); return sock_io(sock, (sock_call)send, POLLOUT, data, size, progress, arg, "send"); } static int safe_recv(int sock, void *data, size_t size, void (*progress)(void *arg), void *arg) { return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv"); } static void print_progress(char **test_names, unsigned num_names, const ucx_perf_result_t *result, unsigned flags, int final, int is_server, int is_multi_thread) { static const char *fmt_csv; static const char *fmt_numeric; static const char *fmt_plain; unsigned i; if (!(flags & TEST_FLAG_PRINT_RESULTS) || (!final && (flags & TEST_FLAG_PRINT_FINAL))) { return; } if (flags & TEST_FLAG_PRINT_CSV) { for (i = 0; i < num_names; ++i) { printf("%s,", test_names[i]); } } #if _OPENMP if (!final) { printf("[thread %d]", omp_get_thread_num()); } else if (flags & TEST_FLAG_PRINT_RESULTS) { printf("Final: "); } #endif if (is_multi_thread && final) { fmt_csv = "%4.0f,%.3f,%.2f,%.0f\n"; fmt_numeric = "%'18.0f %29.3f %22.2f %'24.0f\n"; fmt_plain = "%18.0f %29.3f %22.2f %23.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.total_average * 1000000.0, result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.total_average); } else { fmt_csv = "%4.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n"; fmt_numeric = "%'18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %'11.0f %'11.0f\n"; fmt_plain = "%18.0f %9.3f %9.3f %9.3f %11.2f %10.2f %11.0f %11.0f\n"; printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv : (flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric : fmt_plain, (double)result->iters, result->latency.typical * 1000000.0, result->latency.moment_average * 1000000.0, result->latency.total_average * 1000000.0, result->bandwidth.moment_average / (1024.0 * 1024.0), result->bandwidth.total_average / (1024.0 * 1024.0), result->msgrate.moment_average, result->msgrate.total_average); } fflush(stdout); } static void print_header(struct perftest_context *ctx) { const char *overhead_lat_str; const char *test_data_str; const char *test_api_str; test_type_t *test; unsigned i; test = (ctx->params.test_id == TEST_ID_UNDEFINED) ? NULL : &tests[ctx->params.test_id]; if ((ctx->flags & TEST_FLAG_PRINT_TEST) && (test != NULL)) { if (test->api == UCX_PERF_API_UCT) { test_api_str = "transport layer"; switch (ctx->params.super.uct.data_layout) { case UCT_PERF_DATA_LAYOUT_SHORT: test_data_str = "short"; break; case UCT_PERF_DATA_LAYOUT_SHORT_IOV: test_data_str = "short iov"; break; case UCT_PERF_DATA_LAYOUT_BCOPY: test_data_str = "bcopy"; break; case UCT_PERF_DATA_LAYOUT_ZCOPY: test_data_str = "zcopy"; break; default: test_data_str = "(undefined)"; break; } } else if (test->api == UCX_PERF_API_UCP) { test_api_str = "protocol layer"; test_data_str = "(automatic)"; /* TODO contig/stride/stream */ } else { return; } printf("+------------------------------------------------------------------------------------------+\n"); printf("| API: %-60s |\n", test_api_str); printf("| Test: %-60s |\n", test->desc); printf("| Data layout: %-60s |\n", test_data_str); printf("| Send memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.send_mem_type]); printf("| Recv memory: %-60s |\n", ucs_memory_type_names[ctx->params.super.recv_mem_type]); printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params.super)); } if (ctx->flags & TEST_FLAG_PRINT_CSV) { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { for (i = 0; i < ctx->num_batch_files; ++i) { printf("%s,", ucs_basename(ctx->batch_files[i])); } printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n"); } } else { if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { overhead_lat_str = (test == NULL) ? "overhead" : test->overhead_lat; printf("+--------------+--------------+-----------------------------+---------------------+-----------------------+\n"); printf("| | | %8s (usec) | bandwidth (MB/s) | message rate (msg/s) |\n", overhead_lat_str); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); printf("| Stage | # iterations | typical | average | overall | average | overall | average | overall |\n"); printf("+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n"); } else if (ctx->flags & TEST_FLAG_PRINT_TEST) { printf("+------------------------------------------------------------------------------------------+\n"); } } } static void print_test_name(struct perftest_context *ctx) { char buf[200]; unsigned i, pos; if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) { strcpy(buf, "+--------------+--------------+---------+---------+---------+----------+----------+-----------+-----------+"); pos = 1; for (i = 0; i < ctx->num_batch_files; ++i) { if (i != 0) { buf[pos++] = '/'; } memcpy(&buf[pos], ctx->test_names[i], ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1)); pos += strlen(ctx->test_names[i]); } if (ctx->flags & TEST_FLAG_PRINT_RESULTS) { printf("%s\n", buf); } } } static void print_memory_type_usage(void) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if (ucx_perf_mem_type_allocators[it] != NULL) { printf(" %s - %s\n", ucs_memory_type_names[it], ucs_memory_type_descs[it]); } } } static void usage(const struct perftest_context *ctx, const char *program) { static const char* api_names[] = { [UCX_PERF_API_UCT] = "UCT", [UCX_PERF_API_UCP] = "UCP" }; test_type_t *test; int UCS_V_UNUSED rank; #ifdef HAVE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (ctx->mpi && (rank != 0)) { return; } #endif #if defined (HAVE_MPI) printf(" Note: test can be also launched as an MPI application\n"); printf("\n"); #elif defined (HAVE_RTE) printf(" Note: this test can be also launched as an libRTE application\n"); printf("\n"); #endif printf(" Usage: %s [ server-hostname ] [ options ]\n", program); printf("\n"); printf(" Common options:\n"); printf(" -t <test> test to run:\n"); for (test = tests; test->name; ++test) { printf(" %13s - %s %s\n", test->name, api_names[test->api], test->desc); } printf("\n"); printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n", ctx->params.super.msg_size_list[0]); printf(" for example: \"-s 16,48,8192,8192,14\"\n"); printf(" -m <send mem type>[,<recv mem type>]\n"); printf(" memory type of message for sender and receiver (host)\n"); print_memory_type_usage(); printf(" -n <iters> number of iterations to run (%"PRIu64")\n", ctx->params.super.max_iter); printf(" -w <iters> number of warm-up iterations (%"PRIu64")\n", ctx->params.super.warmup_iter); printf(" -c <cpulist> set affinity to this CPU list (separated by comma) (off)\n"); printf(" -O <count> maximal number of uncompleted outstanding sends\n"); printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n", ctx->params.super.iov_stride); printf(" -T <threads> number of threads in the test (%d)\n", ctx->params.super.thread_count); printf(" -o do not progress the responder in one-sided tests\n"); printf(" -B register memory with NONBLOCK flag\n"); printf(" -b <file> read and execute tests from a batch file: every line in the\n"); printf(" file is a test to run, first word is test name, the rest of\n"); printf(" the line is command-line arguments for the test.\n"); printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port); #ifdef HAVE_MPI printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi); #endif printf(" -h show this help message\n"); printf("\n"); printf(" Output format:\n"); printf(" -N use numeric formatting (thousands separator)\n"); printf(" -f print only final numbers\n"); printf(" -v print CSV-formatted output\n"); printf("\n"); printf(" UCT only:\n"); printf(" -d <device> device to use for testing\n"); printf(" -x <tl> transport to use for testing\n"); printf(" -D <layout> data layout for sender side:\n"); printf(" short - short messages (default, cannot be used for get)\n"); printf(" shortiov - short io-vector messages (only for active messages)\n"); printf(" bcopy - copy-out (cannot be used for atomics)\n"); printf(" zcopy - zero-copy (cannot be used for atomics)\n"); printf(" iov - scatter-gather list (iovec)\n"); printf(" -W <count> flow control window size, for active messages (%u)\n", ctx->params.super.uct.fc_window); printf(" -H <size> active message header size (%zu)\n", ctx->params.super.am_hdr_size); printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n"); printf(" thread_spinlock - separate progress thread with spin locking\n"); printf(" thread_mutex - separate progress thread with mutex locking\n"); printf(" signal - signal-based timer\n"); printf("\n"); printf(" UCP only:\n"); printf(" -M <thread> thread support level for progress engine (single)\n"); printf(" single - only the master thread can access\n"); printf(" serialized - one thread can access at a time\n"); printf(" multi - multiple threads can access\n"); printf(" -D <layout>[,<layout>]\n"); printf(" data layout for sender and receiver side (contig)\n"); printf(" contig - Continuous datatype\n"); printf(" iov - Scatter-gather list\n"); printf(" -C use wild-card tag for tag tests\n"); printf(" -U force unexpected flow by using tag probe\n"); printf(" -r <mode> receive mode for stream tests (recv)\n"); printf(" recv : Use ucp_stream_recv_nb\n"); printf(" recv_data : Use ucp_stream_recv_data_nb\n"); printf(" -I create context with wakeup feature enabled\n"); printf("\n"); printf(" NOTE: When running UCP tests, transport and device should be specified by\n"); printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n"); printf("\n"); } static ucs_status_t parse_ucp_datatype_params(const char *opt_arg, ucp_perf_datatype_t *datatype) { const char *iov_type = "iov"; const size_t iov_type_size = strlen("iov"); const char *contig_type = "contig"; const size_t contig_type_size = strlen("contig"); if (0 == strncmp(opt_arg, iov_type, iov_type_size)) { *datatype = UCP_PERF_DATATYPE_IOV; } else if (0 == strncmp(opt_arg, contig_type, contig_type_size)) { *datatype = UCP_PERF_DATATYPE_CONTIG; } else { return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_mem_type(const char *opt_arg, ucs_memory_type_t *mem_type) { ucs_memory_type_t it; for (it = UCS_MEMORY_TYPE_HOST; it < UCS_MEMORY_TYPE_LAST; it++) { if(!strcmp(opt_arg, ucs_memory_type_names[it]) && (ucx_perf_mem_type_allocators[it] != NULL)) { *mem_type = it; return UCS_OK; } } ucs_error("Unsupported memory type: \"%s\"", opt_arg); return UCS_ERR_INVALID_PARAM; } static ucs_status_t parse_mem_type_params(const char *opt_arg, ucs_memory_type_t *send_mem_type, ucs_memory_type_t *recv_mem_type) { const char *delim = ","; char *token = strtok((char*)opt_arg, delim); if (UCS_OK != parse_mem_type(token, send_mem_type)) { return UCS_ERR_INVALID_PARAM; } token = strtok(NULL, delim); if (NULL == token) { *recv_mem_type = *send_mem_type; return UCS_OK; } else { return parse_mem_type(token, recv_mem_type); } } static ucs_status_t parse_message_sizes_params(const char *opt_arg, ucx_perf_params_t *params) { const char delim = ','; size_t *msg_size_list, token_num, token_it; char *optarg_ptr, *optarg_ptr2; optarg_ptr = (char *)opt_arg; token_num = 0; /* count the number of given message sizes */ while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) { ++optarg_ptr; ++token_num; } ++token_num; msg_size_list = realloc(params->msg_size_list, sizeof(*params->msg_size_list) * token_num); if (NULL == msg_size_list) { return UCS_ERR_NO_MEMORY; } params->msg_size_list = msg_size_list; optarg_ptr = (char *)opt_arg; errno = 0; for (token_it = 0; token_it < token_num; ++token_it) { params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10); if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) || ((errno != 0) && (params->msg_size_list[token_it] == 0)) || (optarg_ptr == optarg_ptr2)) { free(params->msg_size_list); params->msg_size_list = NULL; /* prevent double free */ ucs_error("Invalid option substring argument at position %lu", token_it); return UCS_ERR_INVALID_PARAM; } optarg_ptr = optarg_ptr2 + 1; } params->msg_size_cnt = token_num; return UCS_OK; } static ucs_status_t init_test_params(perftest_params_t *params) { memset(params, 0, sizeof(*params)); params->super.api = UCX_PERF_API_LAST; params->super.command = UCX_PERF_CMD_LAST; params->super.test_type = UCX_PERF_TEST_TYPE_LAST; params->super.thread_mode = UCS_THREAD_MODE_SINGLE; params->super.thread_count = 1; params->super.async_mode = UCS_ASYNC_THREAD_LOCK_TYPE; params->super.wait_mode = UCX_PERF_WAIT_MODE_LAST; params->super.max_outstanding = 0; params->super.warmup_iter = 10000; params->super.am_hdr_size = 8; params->super.alignment = ucs_get_page_size(); params->super.max_iter = 1000000l; params->super.max_time = 0.0; params->super.report_interval = 1.0; params->super.flags = UCX_PERF_TEST_FLAG_VERBOSE; params->super.uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW; params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; params->super.send_mem_type = UCS_MEMORY_TYPE_HOST; params->super.recv_mem_type = UCS_MEMORY_TYPE_HOST; params->super.msg_size_cnt = 1; params->super.iov_stride = 0; params->super.ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG; params->super.ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG; strcpy(params->super.uct.dev_name, TL_RESOURCE_NAME_NONE); strcpy(params->super.uct.tl_name, TL_RESOURCE_NAME_NONE); params->super.msg_size_list = calloc(params->super.msg_size_cnt, sizeof(*params->super.msg_size_list)); if (params->super.msg_size_list == NULL) { return UCS_ERR_NO_MEMORY; } params->super.msg_size_list[0] = 8; params->test_id = TEST_ID_UNDEFINED; return UCS_OK; } static ucs_status_t parse_test_params(perftest_params_t *params, char opt, const char *opt_arg) { char *optarg2 = NULL; test_type_t *test; unsigned i; switch (opt) { case 'd': ucs_snprintf_zero(params->super.uct.dev_name, sizeof(params->super.uct.dev_name), "%s", opt_arg); return UCS_OK; case 'x': ucs_snprintf_zero(params->super.uct.tl_name, sizeof(params->super.uct.tl_name), "%s", opt_arg); return UCS_OK; case 't': for (i = 0; tests[i].name != NULL; ++i) { test = &tests[i]; if (!strcmp(opt_arg, test->name)) { params->super.api = test->api; params->super.command = test->command; params->super.test_type = test->test_type; params->test_id = i; break; } } if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("Invalid option argument for -t"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'D': if (!strcmp(opt_arg, "short")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT; } else if (!strcmp(opt_arg, "shortiov")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT_IOV; } else if (!strcmp(opt_arg, "bcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY; } else if (!strcmp(opt_arg, "zcopy")) { params->super.uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY; } else if (UCS_OK == parse_ucp_datatype_params(opt_arg, &params->super.ucp.send_datatype)) { optarg2 = strchr(opt_arg, ','); if (optarg2) { if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1, &params->super.ucp.recv_datatype)) { return UCS_ERR_INVALID_PARAM; } } } else { ucs_error("Invalid option argument for -D"); return UCS_ERR_INVALID_PARAM; } return UCS_OK; case 'i': params->super.iov_stride = atol(opt_arg); return UCS_OK; case 'n': params->super.max_iter = atol(opt_arg); return UCS_OK; case 's': return parse_message_sizes_params(opt_arg, &params->super); case 'H': params->super.am_hdr_size = atol(opt_arg); return UCS_OK; case 'W': params->super.uct.fc_window = atoi(opt_arg); return UCS_OK; case 'O': params->super.max_outstanding = atoi(opt_arg); return UCS_OK; case 'w': params->super.warmup_iter = atol(opt_arg); return UCS_OK; case 'o': params->super.flags |= UCX_PERF_TEST_FLAG_ONE_SIDED; return UCS_OK; case 'B': params->super.flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK; return UCS_OK; case 'q': params->super.flags &= ~UCX_PERF_TEST_FLAG_VERBOSE; return UCS_OK; case 'C': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD; return UCS_OK; case 'U': params->super.flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE; return UCS_OK; case 'I': params->super.flags |= UCX_PERF_TEST_FLAG_WAKEUP; return UCS_OK; case 'M': if (!strcmp(opt_arg, "single")) { params->super.thread_mode = UCS_THREAD_MODE_SINGLE; return UCS_OK; } else if (!strcmp(opt_arg, "serialized")) { params->super.thread_mode = UCS_THREAD_MODE_SERIALIZED; return UCS_OK; } else if (!strcmp(opt_arg, "multi")) { params->super.thread_mode = UCS_THREAD_MODE_MULTI; return UCS_OK; } else { ucs_error("Invalid option argument for -M"); return UCS_ERR_INVALID_PARAM; } case 'T': params->super.thread_count = atoi(opt_arg); return UCS_OK; case 'A': if (!strcmp(opt_arg, "thread") || !strcmp(opt_arg, "thread_spinlock")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK; return UCS_OK; } else if (!strcmp(opt_arg, "thread_mutex")) { params->super.async_mode = UCS_ASYNC_MODE_THREAD_MUTEX; return UCS_OK; } else if (!strcmp(opt_arg, "signal")) { params->super.async_mode = UCS_ASYNC_MODE_SIGNAL; return UCS_OK; } else { ucs_error("Invalid option argument for -A"); return UCS_ERR_INVALID_PARAM; } case 'r': if (!strcmp(opt_arg, "recv_data")) { params->super.flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } else if (!strcmp(opt_arg, "recv")) { params->super.flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA; return UCS_OK; } return UCS_ERR_INVALID_PARAM; case 'm': if (UCS_OK != parse_mem_type_params(opt_arg, &params->super.send_mem_type, &params->super.recv_mem_type)) { return UCS_ERR_INVALID_PARAM; } return UCS_OK; default: return UCS_ERR_INVALID_PARAM; } } static ucs_status_t adjust_test_params(perftest_params_t *params, const char *error_prefix) { test_type_t *test; if (params->test_id == TEST_ID_UNDEFINED) { ucs_error("%smissing test name", error_prefix); return UCS_ERR_INVALID_PARAM; } test = &tests[params->test_id]; if (params->super.max_outstanding == 0) { params->super.max_outstanding = test->window_size; } return UCS_OK; } static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name, int *line_num, perftest_params_t *params, char** test_name_p) { #define MAX_SIZE 256 #define MAX_ARG_SIZE 2048 ucs_status_t status; char buf[MAX_ARG_SIZE]; char error_prefix[MAX_ARG_SIZE]; int argc; char *argv[MAX_SIZE + 1]; int c; char *p; do { if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) { return UCS_ERR_NO_ELEM; } ++(*line_num); argc = 0; p = strtok(buf, " \t\n\r"); while (p && (argc < MAX_SIZE)) { argv[argc++] = p; p = strtok(NULL, " \t\n\r"); } argv[argc] = NULL; } while ((argc == 0) || (argv[0][0] == '#')); ucs_snprintf_safe(error_prefix, sizeof(error_prefix), "in batch file '%s' line %d: ", file_name, *line_num); optind = 1; while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) { status = parse_test_params(params, c, optarg); if (status != UCS_OK) { ucs_error("%s-%c %s: %s", error_prefix, c, optarg, ucs_status_string(status)); return status; } } status = adjust_test_params(params, error_prefix); if (status != UCS_OK) { return status; } *test_name_p = strdup(argv[0]); return UCS_OK; } static ucs_status_t parse_cpus(char *opt_arg, struct perftest_context *ctx) { char *endptr, *cpu_list = opt_arg; int cpu; ctx->num_cpus = 0; cpu = strtol(cpu_list, &endptr, 10); while (((*endptr == ',') || (*endptr == '\0')) && (ctx->num_cpus < MAX_CPUS)) { if (cpu < 0) { ucs_error("invalid cpu number detected: (%d)", cpu); return UCS_ERR_INVALID_PARAM; } ctx->cpus[ctx->num_cpus++] = cpu; if (*endptr == '\0') { break; } cpu_list = endptr + 1; /* skip the comma */ cpu = strtol(cpu_list, &endptr, 10); } if (*endptr == ',') { ucs_error("number of listed cpus exceeds the maximum supported value (%d)", MAX_CPUS); return UCS_ERR_INVALID_PARAM; } return UCS_OK; } static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized, int argc, char **argv) { ucs_status_t status; int c; ucs_trace_func(""); ucx_perf_global_init(); /* initialize memory types */ status = init_test_params(&ctx->params); if (status != UCS_OK) { return status; } ctx->server_addr = NULL; ctx->num_batch_files = 0; ctx->port = 13337; ctx->flags = 0; ctx->mpi = mpi_initialized; optind = 1; while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) { switch (c) { case 'p': ctx->port = atoi(optarg); break; case 'b': if (ctx->num_batch_files < MAX_BATCH_FILES) { ctx->batch_files[ctx->num_batch_files++] = optarg; } break; case 'N': ctx->flags |= TEST_FLAG_NUMERIC_FMT; break; case 'f': ctx->flags |= TEST_FLAG_PRINT_FINAL; break; case 'v': ctx->flags |= TEST_FLAG_PRINT_CSV; break; case 'c': ctx->flags |= TEST_FLAG_SET_AFFINITY; status = parse_cpus(optarg, ctx); if (status != UCS_OK) { return status; } break; case 'P': #ifdef HAVE_MPI ctx->mpi = atoi(optarg) && mpi_initialized; break; #endif case 'h': usage(ctx, ucs_basename(argv[0])); return UCS_ERR_CANCELED; default: status = parse_test_params(&ctx->params, c, optarg); if (status != UCS_OK) { usage(ctx, ucs_basename(argv[0])); return status; } break; } } if (optind < argc) { ctx->server_addr = argv[optind]; } return UCS_OK; } static unsigned sock_rte_group_size(void *rte_group) { return 2; } static unsigned sock_rte_group_index(void *rte_group) { sock_rte_group_t *group = rte_group; return group->is_server ? 0 : 1; } static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { sock_rte_group_t *group = rte_group; const unsigned magic = 0xdeadbeef; unsigned snc; snc = magic; safe_send(group->connfd, &snc, sizeof(unsigned), progress, arg); snc = 0; safe_recv(group->connfd, &snc, sizeof(unsigned), progress, arg); ucs_assert(snc == magic); } #pragma omp barrier } static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { sock_rte_group_t *group = rte_group; size_t size; int i; size = 0; for (i = 0; i < iovcnt; ++i) { size += iovec[i].iov_len; } safe_send(group->connfd, &size, sizeof(size), NULL, NULL); for (i = 0; i < iovcnt; ++i) { safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL, NULL); } } static void sock_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { sock_rte_group_t *group = rte_group; int group_index; size_t size; group_index = sock_rte_group_index(rte_group); if (src == group_index) { return; } ucs_assert_always(src == (1 - group_index)); safe_recv(group->connfd, &size, sizeof(size), NULL, NULL); ucs_assert_always(size <= max); safe_recv(group->connfd, buffer, size, NULL, NULL); } static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t sock_rte = { .group_size = sock_rte_group_size, .group_index = sock_rte_group_index, .barrier = sock_rte_barrier, .post_vec = sock_rte_post_vec, .recv = sock_rte_recv, .exchange_vec = (ucx_perf_rte_exchange_vec_func_t)ucs_empty_function, .report = sock_rte_report, }; static ucs_status_t setup_sock_rte(struct perftest_context *ctx) { struct sockaddr_in inaddr; struct hostent *he; ucs_status_t status; int optval = 1; int sockfd, connfd; int ret; sockfd = socket(AF_INET, SOCK_STREAM, 0); if (sockfd < 0) { ucs_error("socket() failed: %m"); status = UCS_ERR_IO_ERROR; goto err; } if (ctx->server_addr == NULL) { optval = 1; status = ucs_socket_setopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); if (status != UCS_OK) { goto err_close_sockfd; } inaddr.sin_family = AF_INET; inaddr.sin_port = htons(ctx->port); inaddr.sin_addr.s_addr = INADDR_ANY; memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("bind() failed: %m"); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } ret = listen(sockfd, 10); if (ret < 0) { ucs_error("listen() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } printf("Waiting for connection...\n"); /* Accept next connection */ connfd = accept(sockfd, NULL, NULL); if (connfd < 0) { ucs_error("accept() failed: %m"); status = UCS_ERR_IO_ERROR; goto err_close_sockfd; } close(sockfd); /* release the memory for the list of the message sizes allocated * during the initialization of the default testing parameters */ free(ctx->params.super.msg_size_list); ctx->params.super.msg_size_list = NULL; ret = safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } if (ctx->params.super.msg_size_cnt != 0) { ctx->params.super.msg_size_list = calloc(ctx->params.super.msg_size_cnt, sizeof(*ctx->params.super.msg_size_list)); if (NULL == ctx->params.super.msg_size_list) { status = UCS_ERR_NO_MEMORY; goto err_close_connfd; } ret = safe_recv(connfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); if (ret) { status = UCS_ERR_IO_ERROR; goto err_close_connfd; } } ctx->sock_rte_group.connfd = connfd; ctx->sock_rte_group.is_server = 1; } else { he = gethostbyname(ctx->server_addr); if (he == NULL || he->h_addr_list == NULL) { ucs_error("host %s not found: %s", ctx->server_addr, hstrerror(h_errno)); status = UCS_ERR_INVALID_ADDR; goto err_close_sockfd; } inaddr.sin_family = he->h_addrtype; inaddr.sin_port = htons(ctx->port); ucs_assert(he->h_length == sizeof(inaddr.sin_addr)); memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length); memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero)); ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr)); if (ret < 0) { ucs_error("connect() failed: %m"); status = UCS_ERR_UNREACHABLE; goto err_close_sockfd; } safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL); if (ctx->params.super.msg_size_cnt != 0) { safe_send(sockfd, ctx->params.super.msg_size_list, sizeof(*ctx->params.super.msg_size_list) * ctx->params.super.msg_size_cnt, NULL, NULL); } ctx->sock_rte_group.connfd = sockfd; ctx->sock_rte_group.is_server = 0; } if (ctx->sock_rte_group.is_server) { ctx->flags |= TEST_FLAG_PRINT_TEST; } else { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = &ctx->sock_rte_group; ctx->params.super.rte = &sock_rte; ctx->params.super.report_arg = ctx; return UCS_OK; err_close_connfd: close(connfd); goto err; err_close_sockfd: close(sockfd); err: return status; } static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx) { close(ctx->sock_rte_group.connfd); return UCS_OK; } #if defined (HAVE_MPI) static unsigned mpi_rte_group_size(void *rte_group) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } static unsigned mpi_rte_group_index(void *rte_group) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { int group_size, my_rank, i; MPI_Request *reqs; int nreqs = 0; int dummy; int flag; #pragma omp barrier #pragma omp master { /* * Naive non-blocking barrier implementation over send/recv, to call user * progress while waiting for completion. * Not using MPI_Ibarrier to be compatible with MPI-1. */ MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); /* allocate maximal possible number of requests */ reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size); if (my_rank == 0) { /* root gathers "ping" from all other ranks */ for (i = 1; i < group_size; ++i) { MPI_Irecv(&dummy, 0, MPI_INT, i /* source */, 1 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } } else { /* every non-root rank sends "ping" and waits for "pong" */ MPI_Send(&dummy, 0, MPI_INT, 0 /* dest */, 1 /* tag */, MPI_COMM_WORLD); MPI_Irecv(&dummy, 0, MPI_INT, 0 /* source */, 2 /* tag */, MPI_COMM_WORLD, &reqs[nreqs++]); } /* Waiting for receive requests */ do { MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE); progress(arg); } while (!flag); if (my_rank == 0) { /* root sends "pong" to all ranks */ for (i = 1; i < group_size; ++i) { MPI_Send(&dummy, 0, MPI_INT, i /* dest */, 2 /* tag */, MPI_COMM_WORLD); } } } #pragma omp barrier } static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec, int iovcnt, void **req) { int group_size; int my_rank; int dest, i; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &group_size); for (dest = 0; dest < group_size; ++dest) { if (dest == my_rank) { continue; } for (i = 0; i < iovcnt; ++i) { MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest, i == (iovcnt - 1), /* Send last iov with tag == 1 */ MPI_COMM_WORLD); } } *req = (void*)(uintptr_t)1; } static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { MPI_Status status; size_t offset; int my_rank; int count; MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (src == my_rank) { return; } offset = 0; do { ucs_assert_always(offset < max); MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Get_count(&status, MPI_BYTE, &count); offset += count; } while (status.MPI_TAG != 1); } static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } #elif defined (HAVE_RTE) static unsigned ext_rte_group_size(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_size(group); } static unsigned ext_rte_group_index(void *rte_group) { rte_group_t group = (rte_group_t)rte_group; return rte_group_rank(group); } static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg), void *arg) { #pragma omp barrier #pragma omp master { rte_group_t group = (rte_group_t)rte_group; int rc; rc = rte_barrier(group); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_barrier"); } } #pragma omp barrier } static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec, int iovcnt, void **req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session; rte_iovec_t *r_vec; int i, rc; rc = rte_srs_session_create(group, 0, &session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_create"); } r_vec = calloc(iovcnt, sizeof(rte_iovec_t)); if (r_vec == NULL) { return; } for (i = 0; i < iovcnt; ++i) { r_vec[i].iov_base = iovec[i].iov_base; r_vec[i].type = rte_datatype_uint8_t; r_vec[i].count = iovec[i].iov_len; } rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_set_data"); } *req = session; free(r_vec); } static void ext_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max, void *req) { rte_group_t group = (rte_group_t)rte_group; rte_srs_session_t session = (rte_srs_session_t)req; void *rte_buffer = NULL; rte_iovec_t r_vec; uint32_t offset; int size; int rc; rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src), "KEY_PERF", &rte_buffer, &size); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_get_data"); return; } r_vec.iov_base = buffer; r_vec.type = rte_datatype_uint8_t; r_vec.count = max; offset = 0; rte_unpack(&r_vec, rte_buffer, &offset); rc = rte_srs_session_destroy(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_session_destroy"); } free(rte_buffer); } static void ext_rte_exchange_vec(void *rte_group, void * req) { rte_srs_session_t session = (rte_srs_session_t)req; int rc; rc = rte_srs_exchange_data(session); if (RTE_SUCCESS != rc) { ucs_error("Failed to rte_srs_exchange_data"); } } static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result, void *arg, int is_final, int is_multi_thread) { struct perftest_context *ctx = arg; print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags, is_final, ctx->server_addr == NULL, is_multi_thread); } static ucx_perf_rte_t ext_rte = { .group_size = ext_rte_group_size, .group_index = ext_rte_group_index, .barrier = ext_rte_barrier, .report = ext_rte_report, .post_vec = ext_rte_post_vec, .recv = ext_rte_recv, .exchange_vec = ext_rte_exchange_vec, }; #endif static ucs_status_t setup_mpi_rte(struct perftest_context *ctx) { #if defined (HAVE_MPI) static ucx_perf_rte_t mpi_rte = { .group_size = mpi_rte_group_size, .group_index = mpi_rte_group_index, .barrier = mpi_rte_barrier, .post_vec = mpi_rte_post_vec, .recv = mpi_rte_recv, .exchange_vec = (void*)ucs_empty_function, .report = mpi_rte_report, }; int size, rank; ucs_trace_func(""); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size != 2) { ucs_error("This test should run with exactly 2 processes (actual: %d)", size); return UCS_ERR_INVALID_PARAM; } MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank == 1) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = NULL; ctx->params.super.rte = &mpi_rte; ctx->params.super.report_arg = ctx; #elif defined (HAVE_RTE) ucs_trace_func(""); ctx->params.rte_group = NULL; ctx->params.rte = &mpi_rte; ctx->params.report_arg = ctx; rte_group_t group; rte_init(NULL, NULL, &group); if (1 == rte_group_rank(group)) { ctx->flags |= TEST_FLAG_PRINT_RESULTS; } ctx->params.super.rte_group = group; ctx->params.super.rte = &ext_rte; ctx->params.super.report_arg = ctx; #endif return UCS_OK; } static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx) { #ifdef HAVE_RTE rte_finalize(); #endif return UCS_OK; } static ucs_status_t check_system(struct perftest_context *ctx) { ucs_sys_cpuset_t cpuset; unsigned i, count, nr_cpus; int ret; ucs_trace_func(""); ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) { ucs_error("failed to get local cpu count: %m"); return UCS_ERR_INVALID_PARAM; } nr_cpus = ret; memset(&cpuset, 0, sizeof(cpuset)); if (ctx->flags & TEST_FLAG_SET_AFFINITY) { for (i = 0; i < ctx->num_cpus; i++) { if (ctx->cpus[i] >= nr_cpus) { ucs_error("cpu (%u) out of range (0..%u)", ctx->cpus[i], nr_cpus - 1); return UCS_ERR_INVALID_PARAM; } } for (i = 0; i < ctx->num_cpus; i++) { CPU_SET(ctx->cpus[i], &cpuset); } ret = ucs_sys_setaffinity(&cpuset); if (ret) { ucs_warn("sched_setaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } } else { ret = ucs_sys_getaffinity(&cpuset); if (ret) { ucs_warn("sched_getaffinity() failed: %m"); return UCS_ERR_INVALID_PARAM; } count = 0; for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cpuset)) { ++count; } } if (count > 2) { ucs_warn("CPU affinity is not set (bound to %u cpus)." " Performance may be impacted.", count); } } return UCS_OK; } static ucs_status_t clone_params(perftest_params_t *dest, const perftest_params_t *src) { size_t msg_size_list_size; *dest = *src; msg_size_list_size = dest->super.msg_size_cnt * sizeof(*dest->super.msg_size_list); dest->super.msg_size_list = malloc(msg_size_list_size); if (dest->super.msg_size_list == NULL) { return ((msg_size_list_size != 0) ? UCS_ERR_NO_MEMORY : UCS_OK); } memcpy(dest->super.msg_size_list, src->super.msg_size_list, msg_size_list_size); return UCS_OK; } static ucs_status_t run_test_recurs(struct perftest_context *ctx, const perftest_params_t *parent_params, unsigned depth) { perftest_params_t params; ucx_perf_result_t result; ucs_status_t status; FILE *batch_file; int line_num; ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files); if (parent_params->super.api == UCX_PERF_API_UCP) { if (strcmp(parent_params->super.uct.dev_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.dev_name); } if (strcmp(parent_params->super.uct.tl_name, TL_RESOURCE_NAME_NONE)) { ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message", parent_params->super.uct.tl_name); } } if (depth >= ctx->num_batch_files) { print_test_name(ctx); return ucx_perf_run(&parent_params->super, &result); } batch_file = fopen(ctx->batch_files[depth], "r"); if (batch_file == NULL) { ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]); return UCS_ERR_IO_ERROR; } line_num = 0; do { status = clone_params(&params, parent_params); if (status != UCS_OK) { goto out; } status = read_batch_file(batch_file, ctx->batch_files[depth], &line_num, &params, &ctx->test_names[depth]); if (status == UCS_OK) { run_test_recurs(ctx, &params, depth + 1); free(ctx->test_names[depth]); ctx->test_names[depth] = NULL; } free(params.super.msg_size_list); params.super.msg_size_list = NULL; } while (status == UCS_OK); if (status == UCS_ERR_NO_ELEM) { status = UCS_OK; } out: fclose(batch_file); return status; } static ucs_status_t run_test(struct perftest_context *ctx) { const char *error_prefix; ucs_status_t status; ucs_trace_func(""); setlocale(LC_ALL, "en_US"); /* no batch files, only command line params */ if (ctx->num_batch_files == 0) { error_prefix = (ctx->flags & TEST_FLAG_PRINT_RESULTS) ? "command line: " : ""; status = adjust_test_params(&ctx->params, error_prefix); if (status != UCS_OK) { return status; } } print_header(ctx); status = run_test_recurs(ctx, &ctx->params, 0); if (status != UCS_OK) { ucs_error("Failed to run test: %s", ucs_status_string(status)); } return status; } int main(int argc, char **argv) { struct perftest_context ctx; ucs_status_t status; int mpi_initialized; int mpi_rte; int ret; #ifdef HAVE_MPI int provided; mpi_initialized = !isatty(0) && /* Using MPI_THREAD_FUNNELED since ucx_perftest supports * using multiple threads when only the main one makes * MPI calls (which is also suitable for a single threaded * run). * MPI_THREAD_FUNNELED: * The process may be multi-threaded, but only the main * thread will make MPI calls (all MPI calls are funneled * to the main thread). */ (MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided) == 0); if (mpi_initialized && (provided != MPI_THREAD_FUNNELED)) { printf("MPI_Init_thread failed to set MPI_THREAD_FUNNELED. (provided = %d)\n", provided); ret = -1; goto out; } #else mpi_initialized = 0; #endif /* Parse command line */ status = parse_opts(&ctx, mpi_initialized, argc, argv); if (status != UCS_OK) { ret = (status == UCS_ERR_CANCELED) ? 0 : -127; goto out_msg_size_list; } #ifdef __COVERITY__ /* coverity[dont_call] */ mpi_rte = rand(); /* Shut up deadcode error */ #endif if (ctx.mpi) { mpi_rte = 1; } else { #ifdef HAVE_RTE mpi_rte = 1; #else mpi_rte = 0; #endif } status = check_system(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Create RTE */ status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx); if (status != UCS_OK) { ret = -1; goto out_msg_size_list; } /* Run the test */ status = run_test(&ctx); if (status != UCS_OK) { ret = -1; goto out_cleanup_rte; } ret = 0; out_cleanup_rte: (mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx); out_msg_size_list: free(ctx.params.super.msg_size_list); #if HAVE_MPI out: #endif if (mpi_initialized) { #ifdef HAVE_MPI MPI_Finalize(); #endif } return ret; }
tree.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { // Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles if (IsZero(output)) { leaf_value_[leaf] = 0; } else { leaf_value_[leaf] = output; } } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { double new_leaf_value = leaf_value_[i] * rate; // Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles if (IsZero(new_leaf_value)) { leaf_value_[i] = 0; } else { leaf_value_[i] = new_leaf_value; } } shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { double new_leaf_value = val + leaf_value_[i]; // Prevent denormal values because they can cause std::out_of_range exception when converting strings to doubles if (IsZero(new_leaf_value)) { leaf_value_[i] = 0; } else { leaf_value_[i] = new_leaf_value; } } // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return true; } else { return false; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK(max_depth_ >= 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
solver.c
#include"SimpleMOC_header.h" /* Efficient version of attenuate fluxes which determines the change in angular * flux along a particular track across a fine axial region and tallies the * contribution to the scalar flux in the fine axial region. This function * assumes a quadratic source, which is calculated on the fly using neighboring * source values. * * This version decomposes the work into many for loops for efficient SIMD * instructions and to reduce register pressure. For a more descriptive * (but less effiient) version of the code in terms of the underlying physics, * see alt_attenuate_fluxes which solves the problem in a more naive, * straightforward manner. */ void attenuate_fluxes( Track * track, bool forward, Source * QSR, Input * I_in, Params * params_in, float ds, float mu, float az_weight, AttenuateVars * A ) { Input I = *I_in; Params params = *params_in; // unload attenuate vars float * restrict q0 = A->q0; float * restrict q1 = A->q1; float * restrict q2 = A->q2; float * restrict sigT = A->sigT; float * restrict tau = A->tau; float * restrict sigT2 = A->sigT2; float * restrict expVal = A->expVal; float * restrict reuse = A->reuse; float * restrict flux_integral = A->flux_integral; float * restrict tally = A->tally; float * restrict t1 = A->t1; float * restrict t2 = A->t2; float * restrict t3 = A->t3; float * restrict t4 = A->t4; // compute fine axial interval spacing float dz = I.height / (I.fai * I.decomp_assemblies_ax * I.cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5f ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I.fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight; float mu2 = mu * mu; // load fine source region flux vector float * FSR_flux = QSR -> fine_flux[fine_id]; if( fine_id == 0 ) { // adjust z height to account for edge zin -= dz; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id][g]; float y2 = QSR->fine_source[fine_id+1][g]; float y3 = QSR->fine_source[fine_id+2][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } else if ( fine_id == I.fai - 1 ) { // adjust z height to account for edge zin += dz; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id-2][g]; float y2 = QSR->fine_source[fine_id-1][g]; float y3 = QSR->fine_source[fine_id][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } else { // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2.f*dz); float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz); // calculate q0, q1, q2 q0[g] = c0 + c1*zin + c2*zin*zin; q1[g] = c1 + 2.f*c2*zin; q2[g] = c2; } } // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // load total cross section sigT[g] = QSR->sigT[g]; // calculate common values for efficiency tau[g] = sigT[g] * ds; sigT2[g] = sigT[g] * sigT[g]; } // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) expVal[g] = interpolateTable( params.expTable, tau[g] ); // Flux Integral // Re-used Term #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g] / (sigT[g] * sigT2[g]); } float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; //#pragma vector nontemporal #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // add contribution to new source flux flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * psi[g] - q0[g]) * expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2 * (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g]) / (3.f * sigT2[g] * sigT2[g]); } #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { // Prepare tally tally[g] = weight * flux_integral[g]; } #ifdef OPENMP omp_set_lock(QSR->locks + fine_id); #endif #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { FSR_flux[g] += tally[g]; } #ifdef OPENMP omp_unset_lock(QSR->locks + fine_id); #endif // Term 1 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t1[g] = q0[g] * expVal[g] / sigT[g]; } // Term 2 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g]; } // Term 3 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t3[g] = q2[g] * mu2 * reuse[g]; } // Term 4 #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { t4[g] = psi[g] * (1.f - expVal[g]); } // Total psi #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I.n_egroups; g++) { psi[g] = t1[g] + t2[g] + t3[g] + t4[g]; } } // single direction transport sweep void transport_sweep( Params * params, Input * I ) { if(I->mype==0) printf("Starting transport sweep ...\n"); // calculate the height of a node's domain and of each FSR double node_delta_z = I->height / I->decomp_assemblies_ax; double fine_delta_z = node_delta_z / (I->cai * I->fai); /* loop over tracks (implicitly azimuthal angles, tracks in azimuthal * angles, polar angles, and z stacked rays) */ //print_Input_struct( I ); long segments_processed = 0; #pragma omp parallel default(none) \ shared( I, params, node_delta_z, fine_delta_z ) \ reduction(+ : segments_processed ) { #ifdef OPENMP int thread = omp_get_thread_num(); int nthreads = omp_get_num_threads(); unsigned int seed = time(NULL) * (thread+1); #endif //print_Input_struct( I ); #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events, I); } #endif AttenuateVars A; float * ptr = (float * ) malloc( I->n_egroups * 14 * sizeof(float)); A.q0 = ptr; ptr += I->n_egroups; A.q1 = ptr; ptr += I->n_egroups; A.q2 = ptr; ptr += I->n_egroups; A.sigT = ptr; ptr += I->n_egroups; A.tau = ptr; ptr += I->n_egroups; A.sigT2 = ptr; ptr += I->n_egroups; A.expVal = ptr; ptr += I->n_egroups; A.reuse = ptr; ptr += I->n_egroups; A.flux_integral = ptr; ptr += I->n_egroups; A.tally = ptr; ptr += I->n_egroups; A.t1 = ptr; ptr += I->n_egroups; A.t2 = ptr; ptr += I->n_egroups; A.t3 = ptr; ptr += I->n_egroups; A.t4 = ptr; #pragma omp for schedule( dynamic ) for (long i = 0; i < I->ntracks_2D; i++) { // print progress #ifdef OPENMP if(I->mype==0 && thread == 0) { printf("\rAttenuating Tracks... (%.0lf%% completed)", (i / ( (double)I->ntracks_2D / (double) nthreads )) / (double) nthreads * 100.0); } #else if( i % 50 == 0) if(I->mype==0) printf("%s%ld%s%ld\n","2D Tracks Completed = ", i," / ", I->ntracks_2D ); #endif // treat positive-z traveling rays first bool pos_z_dir = true; for( int j = 0; j < I->n_polar_angles; j++) { if( j == I->n_polar_angles / 2 ) pos_z_dir = false; float p_angle = params->polar_angles[j]; float mu = cos(p_angle); // start with all z stacked rays int begin_stacked = 0; int end_stacked = I->z_stacked; for( int n = 0; n < params->tracks_2D[i].n_segments; n++) { // calculate distance traveled in cell if segment completed float s_full = params->tracks_2D[i].segments[n].length / sin(p_angle); // allocate varaible for distance traveled in an FSR float ds = 0; // loop over remaining z-stacked rays for( int k = begin_stacked; k < end_stacked; k++) { // initialize s to full length float s = s_full; // select current track Track * track = &params->tracks[i][j][k]; // set flag for completeion of segment bool seg_complete = false; // calculate interval int curr_interval; if( pos_z_dir) curr_interval = get_pos_interval(track->z_height, fine_delta_z); else curr_interval = get_neg_interval(track->z_height, fine_delta_z); while( !seg_complete ) { // flag to reset z position bool reset = false; /* calculate new height based on s * (distance traveled in FSR) */ float z = track->z_height + s * cos(p_angle); // check if still in same FSR (fine axial interval) int new_interval; if( pos_z_dir ) new_interval = get_pos_interval(z, fine_delta_z); else new_interval = get_neg_interval(z, fine_delta_z); if( new_interval == curr_interval ) { seg_complete = true; ds = s; } // otherwise, we need to recalculate distances else { // correct z if( pos_z_dir ) { curr_interval++; z = fine_delta_z * (float) curr_interval; } else{ curr_interval--; z = fine_delta_z * (float) curr_interval; } // calculate distance travelled in FSR (ds) ds = (z - track->z_height) / cos(p_angle); // update track length remaining s -= ds; /* check remaining track length to protect * against potential roundoff errors */ if( s <= 0 ) seg_complete = true; // check if out of bounds or track complete if( z <= 0 || z >= node_delta_z ) { // mark segment as completed seg_complete = true; // remember to no longer treat this track if ( pos_z_dir ) end_stacked--; else begin_stacked++; // reset z height reset = true; } } // pick a random FSR (cache miss expected) #ifdef OPENMP long QSR_id = rand_r(&seed) % I->n_source_regions_per_node; #else long QSR_id = rand() % I->n_source_regions_per_node; #endif /* update sources and fluxes from attenuation * over FSR */ if( I->axial_exp == 2 ) { attenuate_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) { attenuate_FSR_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else { printf("Error: invalid axial expansion order"); printf("\n Please input 0 or 2\n"); exit(1); } // update with new z height or reset if finished if( n == params->tracks_2D[i].n_segments - 1 || reset) { if( pos_z_dir) track->z_height = I->axial_z_sep * k; else track->z_height = I->axial_z_sep * (k+1); } else track->z_height = z; } } } } } #ifdef OPENMP if(thread == 0 && I->mype==0) printf("\n"); #endif #ifdef PAPI if( thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events, I); #endif } I->segments_processed = segments_processed; return; } // run one full transport sweep, return k void two_way_transport_sweep( Params * params, Input * I ) { if(I->mype==0) printf("Starting transport sweep ...\n"); // calculate the height of a node's domain and of each FSR double node_delta_z = I->height / I->decomp_assemblies_ax; int num_intervals = (I->cai * I->fai); double fine_delta_z = node_delta_z / num_intervals; /* loop over tracks (implicitly azimuthal angles, tracks in azimuthal * angles, polar angles, and z stacked rays) */ long segments_processed = 0; #pragma omp parallel default(none) \ shared( I, params, node_delta_z, fine_delta_z, num_intervals ) \ reduction(+ : segments_processed ) { #ifdef OPENMP int thread = omp_get_thread_num(); int nthreads = omp_get_num_threads(); unsigned int seed = time(NULL) * (thread+1); #endif //print_Input_struct( I ); #ifdef PAPI int eventset = PAPI_NULL; int num_papi_events; #pragma omp critical { counter_init(&eventset, &num_papi_events, I); } #endif AttenuateVars A; float * ptr = (float * ) malloc( I->n_egroups * 14 * sizeof(float)); A.q0 = ptr; ptr += I->n_egroups; A.q1 = ptr; ptr += I->n_egroups; A.q2 = ptr; ptr += I->n_egroups; A.sigT = ptr; ptr += I->n_egroups; A.tau = ptr; ptr += I->n_egroups; A.sigT2 = ptr; ptr += I->n_egroups; A.expVal = ptr; ptr += I->n_egroups; A.reuse = ptr; ptr += I->n_egroups; A.flux_integral = ptr; ptr += I->n_egroups; A.tally = ptr; ptr += I->n_egroups; A.t1 = ptr; ptr += I->n_egroups; A.t2 = ptr; ptr += I->n_egroups; A.t3 = ptr; ptr += I->n_egroups; A.t4 = ptr; #pragma omp for schedule( dynamic ) for (long i = 0; i < I->ntracks_2D; i++) { // print progress #ifdef OPENMP if(I->mype==0 && thread == 0) { printf("\rAttenuating Tracks... (%.0lf%% completed)", (i / ( (double)I->ntracks_2D / (double) nthreads )) / (double) nthreads * 100.0); } #else if( i % 50 == 0) if(I->mype==0) printf("%s%ld%s%ld\n","2D Tracks Completed = ", i," / ", I->ntracks_2D ); #endif // allocate arrays for segment storage FIXME double ** seg_dist = malloc( I->z_stacked * sizeof(double *) ); Source *** seg_src = malloc( I->z_stacked * sizeof(Source**) ); int * seg_idx = malloc( I->z_stacked * sizeof(int) ); int * seg_size = malloc( I->z_stacked * sizeof(int) ); // fill matrix with arrays FIXME for( int k = 0; k < I->z_stacked; k++) { seg_size[k] = 2 * I->segments_per_track; seg_dist[k] = malloc( seg_size[k] * sizeof(double) ); seg_src[k] = malloc( seg_size[k] * sizeof(Source *) ); seg_idx[k] = 0; } // treat positive-z traveling rays first bool pos_z_dir = true; for( int j = 0; j < I->n_polar_angles; j++) { if( j == I->n_polar_angles / 2 ) pos_z_dir = false; float p_angle = params->polar_angles[j]; float mu = cos(p_angle); // start with all z stacked rays int begin_stacked = 0; int end_stacked = I->z_stacked; // reset semgnet indexes for( int k = 0; k < I->z_stacked; k++) seg_idx[k] = 0; for( int n = 0; n < params->tracks_2D[i].n_segments; n++) { // calculate distance traveled in cell if segment completed float s_full = params->tracks_2D[i].segments[n].length / sin(p_angle); // allocate varaible for distance traveled in an FSR float ds = 0; // loop over remaining z-stacked rays int tracks_completed = 0; for( int k = begin_stacked; k < end_stacked; k++) { // select current track Track * track = &params->tracks[i][j][k]; // determine current axial interval int interval = (int) track->z_height / fine_delta_z; // calculate distance to domain boundary float bound_dist; if( pos_z_dir) bound_dist = (node_delta_z - track->z_height) / mu; else bound_dist = -track->z_height / mu; // determine track length float s; if( s_full < bound_dist ) s = s_full; else { // note completion of track s = bound_dist; tracks_completed++; } // set flag for completeion of segment bool seg_complete = false; while( !seg_complete ) { // initialize tracking variables long QSR_id = interval + num_intervals * n; float ds; float z; // calculate z height of next fine axial interval float fai_z_height; if( pos_z_dir ) fai_z_height = (interval + 1) * fine_delta_z ; else fai_z_height = interval * fine_delta_z; // calculate z distance to next fine axial interval float z_dist_to_fai = fai_z_height - track->z_height; /* calculate total distance (s) to fine axial * interval */ float s_dist_to_fai = z_dist_to_fai / mu; // determine if a fine axial interval is crossed if( s_dist_to_fai < s ) { if( pos_z_dir ) interval++; else interval--; ds = s_dist_to_fai; z = track->z_height + z_dist_to_fai; } else { ds = s; z = track->z_height + s * mu; } /* shorten remaining segment length and check if * completed (accounting for potential roundoff) */ s -= ds; if( s <= 0 || interval < 0 || interval >= num_intervals) seg_complete = true; // pick a random FSR (cache miss expected) #ifdef OPENMP QSR_id = rand_r(&seed) % I->n_source_regions_per_node; #else QSR_id = rand() % I->n_source_regions_per_node; #endif /* update sources and fluxes from attenuation * over FSR */ if( I->axial_exp == 2 ) { attenuate_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) attenuate_FSR_fluxes( track, true, &params->sources[QSR_id], I, params, ds, mu, params->tracks_2D[i].az_weight, &A ); else { printf("Error: invalid axial expansion order"); printf("\n Please input 0 or 2\n"); exit(1); } // update track height track->z_height = z; // save segment length and source FIXME seg_dist[k][seg_idx[k]] = ds; seg_src[k][seg_idx[k]] = &params->sources[QSR_id]; seg_idx[k]++; // check if array needs to grow FIXME if( seg_idx[k] >= seg_size[k] ) { seg_size[k] *= 2; seg_dist[k] = (double *) realloc( seg_dist[k], seg_size[k] * sizeof(double) ); seg_src[k] = (Source **) realloc( seg_src[k], seg_size[k] * sizeof(Source *) ); } } } if(pos_z_dir) end_stacked -= tracks_completed; else begin_stacked += tracks_completed; } // loop over all z stacked rays again for( int k = 0; k < I->z_stacked; k++ ) { for( int n = seg_idx[k]-1; n >= 0; n--) { // load distance float ds = seg_dist[k][n]; // select current track Track * track = &params->tracks[i][j][k]; // update sources and fluxes from attenuation over FSR if( I->axial_exp == 2 ) { attenuate_fluxes( track, false, seg_src[k][n], I, params, ds, -mu, params->tracks_2D[i].az_weight, &A ); segments_processed++; } else if( I->axial_exp == 0 ) attenuate_FSR_fluxes( track, false, seg_src[k][n], I, params, ds, -mu, params->tracks_2D[i].az_weight, &A ); // update z height track->z_height -= ds * mu; } } /* Update all tracks with correct starting z location again * NOTE: this is only here to acocunt for roundoff error */ for( int k = 0; k < I->z_stacked; k++) { Track * track = &params->tracks[i][j][k]; if( pos_z_dir) track->z_height = I->axial_z_sep * k; else track->z_height = I->axial_z_sep * (k+1); } } // free memory for( int k = 0; k < I->z_stacked; k++) { free(seg_dist[k]); free(seg_src[k]); } free(seg_dist); free(seg_src); free(seg_idx); free(seg_size); } #ifdef OPENMP if(thread == 0 && I->mype==0) printf("\n"); #endif #ifdef PAPI if( thread == 0 ) { printf("\n"); border_print(); center_print("PAPI COUNTER RESULTS", 79); border_print(); printf("Count \tSmybol \tDescription\n"); } { #pragma omp barrier } counter_stop(&eventset, num_papi_events, I); #endif } //printf("Number of segments processed: %ld\n", segments_processed); I->segments_processed = segments_processed; return; } /* returns integer number for axial interval for tracks traveling in the * positive direction */ int get_pos_interval( float z, float dz) { int interval = (int) (z/dz); return interval; } /* returns integer number for axial interval for tracks traveling in the * negative direction */ int get_neg_interval( float z, float dz) { int interval = ceil(z/dz); return interval; } int calc_next_fai( float z, float dz, bool pos_dir) { int interval = z/dz; float lower_z = dz * (float) interval; if(pos_dir) return interval + 1; else return interval; } /* Determines the change in angular flux along a particular track across a fine * axial region and tallies the contribution to the scalar flux in the fine * axial region. This function assumes a quadratic source, which is calculated * on the fly using neighboring source values. * * This legacy function is unused since it is less efficient than the current * attenuate_fluxes function. However, it provides a more straightforward * description of the underlying physical problem. */ void alt_attenuate_fluxes( Track * track, bool forward, Source * QSR, Input * I, Params * params, float ds, float mu, float az_weight ) { // compute fine axial interval spacing float dz = I->height / (I->fai * I->decomp_assemblies_ax * I->cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5 ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I->fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight; float mu2 = mu * mu; // load fine source region flux vector float * FSR_flux = QSR -> fine_flux[fine_id]; // cycle over energy groups for( int g = 0; g < I->n_egroups; g++) { // load total cross section float sigT = QSR->sigT[g]; // define source parameters float q0, q1, q2; // calculate source components if( fine_id == 0 ) { // load neighboring sources float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do linear "fitting" float c0 = y2; float c1 = (y3 - y2) / dz; // calculate q0, q1, q2 q0 = c0 + c1*zin; q1 = c1; q2 = 0; } else if( fine_id == I->fai - 1 ) { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; // do linear "fitting" float c0 = y2; float c1 = (y2 - y1) / dz; // calculate q0, q1, q2 q0 = c0 + c1*zin; q1 = c1; q2 = 0; } else { // load neighboring sources float y1 = QSR->fine_source[fine_id-1][g]; float y2 = QSR->fine_source[fine_id][g]; float y3 = QSR->fine_source[fine_id+1][g]; // do quadratic "fitting" float c0 = y2; float c1 = (y1 - y3) / (2*dz); float c2 = (y1 - 2*y2 + y3) / (2*dz*dz); // calculate q0, q1, q2 q0 = c0 + c1*zin + c2*zin*zin; q1 = c1 + 2*c2*zin; q2 = c2; } // calculate common values for efficiency float tau = sigT * ds; float sigT2 = sigT * sigT; // compute exponential ( 1 - exp(-x) ) using table lookup float expVal = interpolateTable( params->expTable, tau ); // load correct angular flux vector float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; // add contribution to new source flux float flux_integral = (q0 * tau + (sigT * psi[g] - q0) * expVal) / sigT2 + q1 * mu * (tau * (tau - 2) + 2 * expVal) / (sigT * sigT2) + q2 * mu2 * (tau * (tau * (tau - 3) + 6) - 6 * expVal) / (3 * sigT2 * sigT2); #pragma omp atomic FSR_flux[g] += weight * flux_integral; // update angular flux psi[g] = psi[g] * (1.0 - expVal) + q0 * expVal / sigT + q1 * mu * (tau - expVal) / sigT2 + q2 * mu2 * (tau * (tau - 2) + 2 * expVal) / (sigT2 * sigT); } } /* Determines the change in angular flux along a particular track across a fine * axial region and tallies the contribution to the scalar flux in the fine * axial region. This function assumes a constant source. */ void attenuate_FSR_fluxes( Track * track, bool forward, Source * FSR, Input * I, Params * params_in, float ds, float mu, float az_weight, AttenuateVars *A) { // upack attenuate vars struct float * restrict tally = A->tally; float * restrict expVal = A->expVal; float * restrict sigT = A->sigT; float * restrict tau = A->tau; Params params = * params_in; // compute fine axial interval spacing float dz = I->height / (I->fai * I->decomp_assemblies_ax * I->cai); // compute z height in cell float zin = track->z_height - dz * ( (int)( track->z_height / dz ) + 0.5f ); // compute fine axial region ID int fine_id = (int) ( track->z_height / dz ) % I->fai; // compute weight (azimuthal * polar) // NOTE: real app would also have volume weight component float weight = track->p_weight * az_weight * mu; // load fine source region flux vector float * FSR_flux = FSR -> fine_flux[fine_id]; // cycle over energy groups #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { // load total cross section sigT[g] = FSR->sigT[g]; tau[g] = sigT[g] * ds; } // compute exponential ( 1 - exp(-x) ) using table lookup #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for(int g = 0; g < I->n_egroups; g++) { expVal[g] = interpolateTable( params.expTable, tau[g] ); } float * psi; if(forward) psi = track->f_psi; else psi = track->b_psi; #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { // compute angular flux attenuation float q = FSR->fine_source[fine_id][g] / sigT[g]; float delta_psi = (psi[g] - q) * expVal[g]; // add contribution to new source flux tally[g] = weight * delta_psi; // update angular flux psi[g] -= delta_psi; } #ifdef OPENMP omp_set_lock(&FSR->locks[fine_id]); #endif #ifdef INTEL #pragma simd #elif defined IBM #pragma simd_level(10) #endif for( int g = 0; g < I->n_egroups; g++) { FSR_flux[g] += tally[g]; } #ifdef OPENMP omp_unset_lock(&FSR->locks[fine_id]); #endif } /* Renormalizes scalar and angular flux for next transport sweep iteration. * Calculation requires multiple pair-wise sums and a reduction accross all * nodes. */ void renormalize_flux( Params params, Input I, CommGrid grid ) { if( I.mype == 0 ) printf("Renormalizing Flux...\n"); float node_fission_rate = 0; #ifdef OPENMP #pragma omp parallel default(none) shared(params, I, grid) \ reduction(+ : node_fission_rate) { #endif // tally total fission rate (pair-wise sum) float * fission_rates = malloc( I.n_source_regions_per_node * sizeof(float) ); float * fine_fission_rates = malloc( I.fai * sizeof(float) ); float * g_fission_rates = malloc( I.n_egroups * sizeof(float) ); // accumulate total fission rate on node domain #pragma omp for schedule(dynamic) for( int i = 0; i < I.n_source_regions_per_node; i++) { Source src = params.sources[i]; for( int j = 0; j < I.fai; j++) { for( int g = 0; g < I.n_egroups; g++) g_fission_rates[g] = src.fine_flux[j][g] * src.vol * src.XS[g][0]; fine_fission_rates[j] = pairwise_sum( g_fission_rates, I.n_egroups ); } fission_rates[i] = pairwise_sum( fine_fission_rates, I.fai ); } node_fission_rate = pairwise_sum(fission_rates, I.n_source_regions_per_node); // free allocated memory free(fission_rates); free(fine_fission_rates); free(g_fission_rates); #ifdef OPENMP } #endif #ifdef MPI // accumulate total fission rate by MPI Allreduce float total_fission_rate = 0; MPI_Barrier(grid.cart_comm_3d); MPI_Allreduce( &node_fission_rate, // Send Buffer &total_fission_rate, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type grid.cart_comm_3d ); // MPI Communicator MPI_Barrier(grid.cart_comm_3d); #else float total_fission_rate = node_fission_rate; #endif // normalize fluxes by fission reaction rate float norm_factor = 1.0 / total_fission_rate; #pragma omp parallel for default(none) \ shared(I, params) private(norm_factor) schedule(dynamic) for( int i = 0; i < I.n_source_regions_per_node; i++) { Source * src = &params.sources[i]; float adjust = norm_factor * 4 * M_PI * I.fai / src->vol; for( int k = 0; k < I.fai; k++) for( int g = 0; g < I.n_egroups; g++) src->fine_flux[k][g] *= adjust; } // normalize boundary fluxes by same factor #pragma omp parallel for default(none) \ shared(I, params) private(norm_factor) schedule(dynamic) for( long i = 0; i < I.ntracks_2D; i++) for( int j = 0; j < I.n_polar_angles; j++) for( int k = 0; k < I.z_stacked; k++) for( int g = 0; g < I.n_egroups; g++) { params.tracks[i][j][k].f_psi[g] *= norm_factor; params.tracks[i][j][k].b_psi[g] *= norm_factor; } if( I.mype == 0 ) printf("Renormalizing Flux Complete.\n"); return; } /* Updates sources for next iteration by computing scattering and fission * components. Calculation includes multiple pair-wise sums and reductions * accross all nodes */ float update_sources( Params params, Input I, float keff ) { // source residual float residual; // calculate inverse multiplication facotr for efficiency float inverse_k = 1.0 / keff; // allocate residual arrays float * group_res = (float *) malloc(I.n_egroups * sizeof(float)); float * fine_res = (float *) malloc(I.fai * sizeof(float)); float * residuals = (float *) malloc(I.n_source_regions_per_node * sizeof(float)); // allocate arrays for summation float * fission_rates = malloc(I.n_egroups * sizeof(float)); float * scatter_rates = malloc(I.n_egroups * sizeof(float)); // cycle through all coarse axial intervals to update source for( long i = 0; i < I.n_source_regions_per_node; i++) { Source src = params.sources[i]; // cycle thorugh all fine axial regions to calculate new source for( int j = 0; j < I.fai; j++) { // calculate total fission source and scattering source float fission_source; float scatter_source; // compute total fission source for( int g = 0; g < I.n_egroups; g++ ) fission_rates[g] = src.fine_flux[j][g] * src.XS[g][0]; fission_source = pairwise_sum( fission_rates, (long) I.n_egroups); // normalize fission source by multiplication factor fission_source *= inverse_k; // compute scattering and new total source for each group for( int g = 0; g < I.n_egroups; g++ ) { for( int g2 = 0; g2 < I.n_egroups; g2++ ) { // compute scatter source originating from g2 -> g scatter_rates[g2] = src.scattering_matrix[g][g2] * src.fine_flux[j][g2]; } scatter_source = pairwise_sum(scatter_rates, (long) I.n_egroups); // compuate new total source float chi = src.XS[g][2]; // calculate new fine source float newSrc = (fission_source * chi + scatter_source) / (4.0 * M_PI); // calculate residual float oldSrc = src.fine_source[j][g]; group_res[g] = (newSrc - oldSrc) * (newSrc - oldSrc) / (oldSrc * oldSrc); /* calculate new source in fine axial interval assuming * isotropic source components */ src.fine_source[j][g] = newSrc; } fine_res[j] = pairwise_sum(group_res, (long) I.n_egroups); } residuals[i] = pairwise_sum(fine_res, (long) I.fai); } // calculate source residual residual = pairwise_sum(residuals, I.n_source_regions_per_node); // free memory free(fission_rates); free(scatter_rates); free(group_res); free(fine_res); free(residuals); // NOTE: See code around line 600 of CPUSolver.cpp in ClosedMOC/ OpenMOC return residual; } /* Computes globall k-effective using multiple pair-wise summations and finally * a reduction accross all nodes */ float compute_keff(Params params, Input I, CommGrid grid) { // allocate temporary memory float * sigma = malloc( I.n_egroups * sizeof(float) ); float * group_rates = malloc( I.n_egroups * sizeof(float) ); float * fine_rates = malloc( I.fai * sizeof(float) ); float * QSR_rates = malloc( I.n_source_regions_per_node * sizeof(float) ); /////////////////////////////////////////////////////////////////////////// // compute total absorption rate, looping over source regions for( long i = 0; i < I.n_source_regions_per_node; i++) { // load absorption XS data Source src = params.sources[i]; for( int g = 0; g < I.n_egroups; g++) sigma[g] = src.XS[g][1]; for( int j = 0; j < I.fai; j++ ) { // calculate absorption rates float * fine_flux = src.fine_flux[j]; for( int g = 0; g < I.n_egroups; g++) group_rates[g] = sigma[g] * fine_flux[g]; // sum absorption over all energy groups fine_rates[j] = pairwise_sum( group_rates, (long) I.n_egroups ); } // sum absorption over all fine axial intervals QSR_rates[i] = pairwise_sum( fine_rates, (long) I.fai ); } // sum absorption over all source regions in a node float node_abs = pairwise_sum( QSR_rates, I.n_source_regions_per_node); /////////////////////////////////////////////////////////////////////////// // compute total absorption rate, looping over source regions for( long i = 0; i < I.n_source_regions_per_node; i++) { // load nuSigmaF XS data Source src = params.sources[i]; for( int g = 0; g < I.n_egroups; g++) sigma[g] = src.XS[g][0]; for( int j = 0; j < I.fai; j++ ) { // calculate absorption rates float * fine_flux = src.fine_flux[j]; for( int g = 0; g < I.n_egroups; g++) group_rates[g] = sigma[g] * fine_flux[g]; // sum fission over all energy groups fine_rates[j] = pairwise_sum( group_rates, (long) I.n_egroups ); } // sum fission over all fine axial intervals QSR_rates[i] = pairwise_sum( fine_rates, (long) I.fai ); } // sum fission over all source regions in a node float node_fission = pairwise_sum( QSR_rates, I.n_source_regions_per_node); /////////////////////////////////////////////////////////////////////////// // MPi Reduction float tot_abs = 0; float tot_fission = 0; float leakage = 0; #ifdef MPI // Total Absorption Reduction MPI_Reduce( &node_abs, // Send Buffer &tot_abs, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator // Total Fission Reduction MPI_Reduce( &node_fission, // Send Buffer &tot_fission, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator // Total Leakage Reduction MPI_Reduce( params.leakage, // Send Buffer &leakage, // Receive Buffer 1, // Element Count MPI_FLOAT, // Element Type MPI_SUM, // Reduciton Operation Type 0, // Master Rank grid.cart_comm_3d ); // MPI Communicator MPI_Barrier(grid.cart_comm_3d); // calculate keff float keff = tot_fission/ (tot_abs + leakage); #else float keff = node_fission / (node_abs + *params.leakage); #endif /////////////////////////////////////////////////////////////////////////// // free memory free(sigma); free(group_rates); free(fine_rates); free(QSR_rates); return keff; } /* Interpolates a formed exponential table to compute ( 1- exp(-x) ) * at the desired x value */ float interpolateTable( Table table, float x) { // check to ensure value is in domain if( x > table.maxVal ) return 1.0f; else { int interval = (int) ( x / table.dx + 0.5f * table.dx ); /* if( interval >= table.N || interval < 0) { printf( "Interval = %d\n", interval); printf( "N = %d\n", table.N); printf( "x = %f\n", x); printf( "dx = %f\n", table.dx); exit(1); } */ float slope = table.values[ 2 * interval ]; float intercept = table.values[ 2 * interval + 1 ]; float val = slope * x + intercept; return val; } }
16_omp_heap.c
// clang-format off // RUN: %c-to-llvm %omp_c_flags %s | %apply-typeart -S 2>&1 | FileCheck %s // REQUIRES: openmp // clang-format on #include <stdlib.h> void foo(int** x) { #pragma omp parallel // transformed to @__kmpc_fork_call { double* pd = calloc(10, sizeof(double)); pd = realloc(pd, 20 * sizeof(double)); } #pragma omp parallel for for (int i = 0; i < 10; ++i) { x[i] = (int*)malloc(8 * sizeof(int)); free(x[i]); } } // CHECK: [[POINTER:%[0-9]+]] = call noalias i8* @calloc(i64 [[SIZE:[0-9]+]], i64 8) // CHECK-NEXT: call void @__typeart_alloc_omp(i8* [[POINTER]], i32 6, i64 [[SIZE]]) // CHECK-NEXT: bitcast i8* [[POINTER]] to double* // CHECK: __typeart_free_omp(i8* [[POINTER:%[0-9]+]]) // CHECK-NEXT: [[POINTER2:%[0-9]+]] = call i8* @realloc(i8* [[POINTER]], i64 160) // CHECK-NEXT: __typeart_alloc_omp(i8* [[POINTER2]], i32 6, i64 20) // CHECK: [[POINTER:%[0-9]+]] = call noalias i8* @malloc // CHECK-NEXT: call void @__typeart_alloc_omp(i8* [[POINTER]], i32 2, i64 8) // CHECK-NEXT: bitcast i8* [[POINTER]] to i32* // CHECK: call void @free // CHECK-NEXT: call void @__typeart_free_omp // CHECK: TypeArtPass [Heap] // CHECK-NEXT: Malloc{{[ ]*}}:{{[ ]*}}3 // CHECK-NEXT: Free{{[ ]*}}:{{[ ]*}}1 // CHECK-NEXT: Alloca{{[ ]*}}:{{[ ]*}}0
chemm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zhemm.c, normal z -> c, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_hemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha \times A \times B + \beta \times C \f] * or * \f[ C = \alpha \times B \times A + \beta \times C \f] * * where alpha and beta are scalars, A is a Hermitian matrix and B and * C are m-by-n matrices. * ******************************************************************************* * * @param[in] side * Specifies whether the Hermitian matrix A appears on the * left or right in the operation as follows: * - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f] * - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f] * * @param[in] uplo * Specifies whether the upper or lower triangular part of * the Hermitian matrix A is to be referenced as follows: * - PlasmaLower: Only the lower triangular part of the * Hermitian matrix A is to be referenced. * - PlasmaUpper: Only the upper triangular part of the * Hermitian matrix A is to be referenced. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft, * and is n otherwise. Only the uplo triangular part is referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,ka). * * @param[in] pB * B is an ldb-by-n matrix, where the leading m-by-n part of * the array B must contain the matrix B. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * C is an ldc-by-n matrix. * On exit, the array is overwritten by the m-by-n updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_chemm * @sa plasma_chemm * ******************************************************************************/ int plasma_chemm(plasma_enum_t side, plasma_enum_t uplo, int m, int n, plasma_complex32_t alpha, plasma_complex32_t *pA, int lda, plasma_complex32_t *pB, int ldb, plasma_complex32_t beta, plasma_complex32_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((uplo != PlasmaLower) && (uplo != PlasmaUpper)) { plasma_error("illegal value of uplo"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } int am; if (side == PlasmaLeft) { am = m; } else { am = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, m)) { plasma_error("illegal value of ldb"); return -9; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -12; } // quick return if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_symm(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, am, am, 0, 0, am, am, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); plasma_omp_cge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_chemm(side, uplo, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * @ingroup plasma_hemm * * Performs Hermitian matrix multiplication. * Non-blocking tile version of plasma_chemm(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] side * Specifies whether the Hermitian matrix A appears on the * left or right in the operation as follows: * - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f] * - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f] * * @param[in] uplo * Specifies whether the upper or lower triangular part of * the Hermitian matrix A is to be referenced as follows: * - PlasmaLower: Only the lower triangular part of the * Hermitian matrix A is to be referenced. * - PlasmaUpper: Only the upper triangular part of the * Hermitian matrix A is to be referenced. * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * ******************************************************************************* * * @sa plasma_chemm * @sa plasma_omp_chemm * ******************************************************************************/ void plasma_omp_chemm(plasma_enum_t side, plasma_enum_t uplo, plasma_complex32_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_complex32_t beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaLower) && (uplo != PlasmaUpper)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || A.n == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pchemm(side, uplo, alpha, A, B, beta, C, sequence, request); }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
spmmd_x_csr_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *matA, const ALPHA_SPMAT_CSR *matB, ALPHA_Number *matC, const ALPHA_INT ldc) { if (matA->cols != matB->rows || ldc < matA->rows) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT m = matA->rows; for(ALPHA_INT i = 0; i < matA->rows; i++) for(ALPHA_INT j = 0; j < matB->cols; j++) { alpha_setzero(matC[index2(j, i, ldc)]); } ALPHA_INT num_thread = alpha_get_thread_num(); ALPHA_INT64 *flop = (ALPHA_INT64 *)alpha_malloc(matB->cols * sizeof(ALPHA_INT64)); memset(flop, '\0', m * sizeof(ALPHA_INT64)); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT ar = 0; ar < m; ar++) { for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++) { ALPHA_INT br = matA->col_indx[ai]; flop[ar] += matB->rows_end[br] - matB->rows_start[br]; } } for (ALPHA_INT i = 1; i < m; i++) { flop[i] += flop[i - 1]; } ALPHA_INT partition[num_thread + 1]; balanced_partition_row_by_flop(flop, m, num_thread, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_thread) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_m_s = partition[tid]; ALPHA_INT local_m_e = partition[tid + 1]; for (ALPHA_INT ar = local_m_s; ar < local_m_e; ar++) { for (ALPHA_INT ai = matA->rows_start[ar]; ai < matA->rows_end[ar]; ai++) { ALPHA_INT br = matA->col_indx[ai]; ALPHA_Number av = matA->values[ai]; for (ALPHA_INT bi = matB->rows_start[br]; bi < matB->rows_end[br]; bi++) { ALPHA_INT bc = matB->col_indx[bi]; ALPHA_Number bv = matB->values[bi]; alpha_madde(matC[index2(bc,ar,ldc)], av, bv); } } } } alpha_free(flop); return ALPHA_SPARSE_STATUS_SUCCESS; }
utils.h
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <numeric> #include <vector> #include <unordered_map> #include <algorithm> #include <cassert> #include <utility> #include <omp.h> #include <string> #include <cstring> #include <ctime> #include <map> #include <atomic> #include <boost/sort/sort.hpp> double MEM_ALLOC_TIME = 0.0; unsigned int NUM_THREADS = 1; size_t SERIAL_TO_PAR_THRESH = NUM_THREADS*100; const unsigned locBuffSizeLarge = 2048; const unsigned locBuffSizeSmall = 256; template <typename T1, typename T2> class valCompareGreater { const std::vector<T2> &value_vector; public: valCompareGreater(const std::vector<T2> &val_vec): value_vector(val_vec) {} bool operator() (const T1 &i1, const T1 &i2) const { return value_vector[i1] > value_vector[i2]; } }; template <typename T1, typename T2> class valCompareLesser { const std::vector<T2> &value_vector; public: valCompareLesser(const std::vector<T2> &val_vec): value_vector(val_vec) {} bool operator() (const T1 &i1, const T1 &i2) const { return value_vector[i1] < value_vector[i2]; } }; template <typename T> void free_vec (std::vector<T> &vec) { std::vector<T> dummy; vec.swap(dummy); } template <typename T> void serial_init (std::vector<T> &vec, T val) { size_t numElems = vec.size(); for (size_t i=0; i<numElems; i++) vec[i] = val; } template <typename T> void parallel_init (std::vector<T> &vec, T val) { size_t numElems = vec.size(); #pragma omp parallel for for (size_t i=0; i<numElems; i++) vec[i] = val; } template <typename Tout, typename Tin> Tout parallel_reduce (std::vector<Tin> &in) { Tout retVal = 0; size_t vecSize = in.size(); #pragma omp parallel for reduction(+:retVal) for (size_t i=0; i<vecSize; i++) retVal += in[i]; return retVal; } //exclusive sum template <typename T1, typename T2> void serial_prefix_sum (std::vector<T1> &output, std::vector<T2> &input) { if (output.size() != (input.size() + 1)) output.resize(input.size() + 1); size_t start = 0; size_t end = input.size(); output[0] = 0; for (size_t i=start; i<end; i++) output[i+1] = output[i] + input[i]; } //exclusive sum template <typename T1, typename T2> void serial_prefix_sum_inclusive (std::vector<T1> &output, std::vector<T2> &input) { if (output.size() != (input.size())) output.resize(input.size()); size_t start = 0; size_t end = input.size(); output[0] = input[0]; for (size_t i=start+1; i<end; i++) output[i] = output[i-1] + input[i]; } //exclusive sum template <typename T1, typename T2> void parallel_prefix_sum (std::vector<T1> &output, std::vector<T2> &input) { if (input.size() < 50*NUM_THREADS) { serial_prefix_sum(output, input); return; } std::vector<T1> scan(input.size()+1); std::vector<T1> indAcc(NUM_THREADS+1, 0); size_t BS = (input.size()-1)/NUM_THREADS + 1; scan[0] = 0; #pragma omp parallel { size_t tid = omp_get_thread_num(); size_t start = tid*BS+1; size_t end = (tid+1)*BS+1; end = (end > input.size()+1) ? input.size()+1 : end; if (start <= end) scan[start] = input[start-1]; for (size_t i=start+1; i<end; i++) scan[i] = scan[i-1] + input[i-1]; indAcc[tid+1] = scan[end-1]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS+1; i++) indAcc[i] += indAcc[i-1]; } for (size_t i=start; i<end; i++) scan[i] += indAcc[tid]; } output.swap(scan); } //inclusive sum template <typename T1, typename T2> void parallel_prefix_sum_inclusive(std::vector<T1> &output, std::vector<T2> &input) { if (input.size() < 50*NUM_THREADS) { serial_prefix_sum_inclusive(output, input); return; } std::vector<T1> scan(input.size()); std::vector<T1> indAcc(NUM_THREADS+1, 0); size_t BS = (input.size()-1)/NUM_THREADS + 1; #pragma omp parallel { size_t tid = omp_get_thread_num(); size_t start = tid*BS; size_t end = std::min((tid+1)*BS, (size_t)input.size()); if (start <= end) scan[start] = input[start]; for (size_t i=start+1; i<end; i++) scan[i] = scan[i-1] + input[i]; indAcc[tid+1] = scan[end-1]; #pragma omp barrier #pragma omp single { for (size_t i=1; i<NUM_THREADS+1; i++) indAcc[i] += indAcc[i-1]; } for (size_t i=start; i<end; i++) scan[i] += indAcc[tid]; } output.swap(scan); } template <typename T> T parallel_find_max (std::vector<T> &vec) { T maxVal = vec[0]; size_t vec_size = vec.size(); #pragma omp parallel for reduction (max:maxVal) for (size_t i=0; i<vec_size; i++) maxVal = std::max(vec[i], maxVal); return maxVal; } template <typename T> T serial_find_max (std::vector<T> &vec) { T maxVal = vec[0]; size_t vec_size = vec.size(); for (size_t i=0; i<vec_size; i++) maxVal = std::max(vec[i], maxVal); return maxVal; } template <typename T1, typename T2> void serial_sort_kv (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { std::stable_sort(idxVec.begin(), idxVec.end(), valCompareGreater<T1,T2>(valueVec)); } template <typename T1, typename T2> void serial_sort_kv_increasing (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { std::stable_sort(idxVec.begin(), idxVec.end(), valCompareLesser<T1,T2>(valueVec)); } //sort keys based on the values. Note that this does not change the value array //The value array is indexed by the keys template <typename T1, typename T2> void parallel_sort_kv (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { boost::sort::parallel_stable_sort(idxVec.begin(), idxVec.end(), valCompareGreater<T1,T2>(valueVec), NUM_THREADS); } //sort keys based on the values. Note that this does not change the value array //The value array is indexed by the keys template <typename T1, typename T2> void parallel_sort_kv_increasing (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { boost::sort::parallel_stable_sort(idxVec.begin(), idxVec.end(), valCompareLesser<T1,T2>(valueVec), NUM_THREADS); } template <typename T1, class compare> void parallel_sort_indices (std::vector<T1> &idxVec, compare comp) { boost::sort::parallel_stable_sort(idxVec.begin(), idxVec.end(), comp, NUM_THREADS); } //unstable versions for better performance template <typename T1, typename T2> void parallel_unstable_sort_kv (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { boost::sort::block_indirect_sort(idxVec.begin(), idxVec.end(), valCompareGreater<T1,T2>(valueVec), NUM_THREADS); } template <typename T1, typename T2> void parallel_unstable_sort_kv_increasing (std::vector<T1> &idxVec, std::vector<T2> &valueVec) { boost::sort::block_indirect_sort(idxVec.begin(), idxVec.end(), valCompareLesser<T1,T2>(valueVec), NUM_THREADS); } template <typename T1, class compare> void parallel_unstable_sort_indices (std::vector<T1> &idxVec, compare comp) { boost::sort::block_indirect_sort(idxVec.begin(), idxVec.end(), comp, NUM_THREADS); } template <typename T> void invertMap (std::vector<T> &ipMap, std::vector<T> &opMap) { opMap.resize(ipMap.size()); #pragma omp parallel for for (size_t i=0; i<ipMap.size(); i++) opMap[ipMap[i]] = i; } template <typename T1, typename T2> void reorderArr (std::vector<T1> &ipArr, std::vector<T2> &newOrder) { std::vector<T1> opArr (ipArr.size()); #pragma omp parallel for for (size_t i=0; i<ipArr.size(); i++) opArr[newOrder[i]] = ipArr[i]; opArr.swap(ipArr); } template <typename T1, typename T2> inline T1 choose2(T2 n) { if (n < 2) return 0; T1 retVal = (T1)n; retVal = retVal*(retVal-1); return retVal/2; } template <typename T> void print_list_horizontal(std::vector<T> &list) { std::cout<<"printing list : "; for (auto x:list) std::cout << x << ", "; std::cout<<std::endl; } template <typename T> void print_list_vertical(std::vector<T> &list) { std::cout<<"printing list : "; for (auto x:list) std::cout << x << std::endl; } //nET - data type for number of elements, for eg. intV for vertices template <typename T, typename nET> nET sequential_compact(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &out) { nET numKeep = 0; for (nET i=0; i<in.size(); i++) numKeep += keep[i]; if (out.size() < numKeep) { out.clear(); out.resize(numKeep); } numKeep = 0; for (nET i=0; i<in.size(); i++) { if (keep[i]) out[numKeep++] = in[i]; } return numKeep; } //nET - data type for number of elements, for eg. intV for vertices template <typename T, typename nET> void parallel_compact(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &out) { if (in.size() < NUM_THREADS*16) { nET numKeep = sequential_compact<T, nET>(in, keep, out); return; } std::vector<nET> activePerThread(NUM_THREADS, 0); std::vector<nET> offset; nET partSize = (in.size()-1)/NUM_THREADS + 1; #pragma omp parallel { size_t tid = omp_get_thread_num(); nET start = partSize*tid; nET end = std::min(start+partSize, (nET)in.size()); nET numActive = 0; for (nET i=start; i<end; i++) numActive += (keep[i] > 0); activePerThread[tid] = numActive; #pragma omp barrier #pragma omp single { serial_prefix_sum(offset, activePerThread); out.resize(offset[NUM_THREADS]); } #pragma omp barrier numActive = 0; for (nET i=start; i<end; i++) { if (keep[i]) { out[offset[tid]+numActive] = in[i]; numActive++; } } } } //nET - data type for number of elements, for eg. intV for vertices template <typename T, typename nET> void parallel_compact_in_place(std::vector<T> &in, std::vector<uint8_t> &keep) { std::vector<T> out; parallel_compact<T, nET>(in, keep, out); in.swap(out); } //overloaded function //construct two separate vectors for elements with keep[i]==true and keep[i]==false //nET - data type for number of elements, for eg. intV for vertices template <typename T, typename nET> void parallel_compact(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &outTrue, std::vector<T> &outFalse) { std::vector<nET> activePerThread(NUM_THREADS, 0); std::vector<nET> inActivePerThread(NUM_THREADS, 0); std::vector<nET> offsetTrue; std::vector<nET> offsetFalse; nET partSize = (in.size()-1)/NUM_THREADS + 1; #pragma omp parallel { size_t tid = omp_get_thread_num(); nET start = partSize*tid; nET end = std::min(start+partSize, (nET)in.size()); nET range = (end>start) ? end-start : 0; nET numActive = 0; for (nET i=start; i<end; i++) numActive += (keep[i] > 0); activePerThread[tid] = numActive; inActivePerThread[tid] = range - numActive; #pragma omp barrier #pragma omp single { serial_prefix_sum(offsetTrue, activePerThread); serial_prefix_sum(offsetFalse, inActivePerThread); outTrue.resize(offsetTrue[NUM_THREADS]); outFalse.resize(offsetFalse[NUM_THREADS]); } #pragma omp barrier numActive = 0; nET numInActive = 0; for (nET i=start; i<end; i++) { if (keep[i]) { outTrue[offsetTrue[tid]+numActive] = in[i]; numActive++; } else { outFalse[offsetFalse[tid]+numInActive] = in[i]; numInActive++; } } } } //overloaded function //compact input vector and return a vector of deleted elements //nET - data type for number of elements, for eg. intV for vertices template <typename T, typename nET> void parallel_compact_in_place(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &delElems) { std::vector<T> out; parallel_compact<T, nET>(in, keep, out, delElems); in.swap(out); } //nET - data type for number of elements, for eg. intV for vertices //use elements of "in" as indices/keys into "keep" vector template <typename T, typename nET> void serial_compact_kv(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &out) { int numActive = 0; for (nET i=0; i<in.size(); i++) numActive += (keep[in[i]] > 0); out.resize(numActive); numActive = 0; for (nET i=0; i<in.size(); i++) { if (keep[in[i]]) out[numActive++] = in[i]; } } //nET - data type for number of elements, for eg. intV for vertices //use elements of "in" as indices/keys into "keep" vector template <typename T, typename nET> void parallel_compact_kv(std::vector<T> &in, std::vector<uint8_t> &keep, std::vector<T> &out) { std::vector<nET> activePerThread(NUM_THREADS, 0); std::vector<nET> offset; nET partSize = (in.size()-1)/NUM_THREADS + 1; #pragma omp parallel { size_t tid = omp_get_thread_num(); nET start = partSize*tid; nET end = std::min(start+partSize, (nET)in.size()); nET numActive = 0; for (nET i=start; i<end; i++) numActive += (keep[in[i]] > 0); activePerThread[tid] = numActive; #pragma omp barrier #pragma omp single { serial_prefix_sum(offset, activePerThread); out.resize(offset[NUM_THREADS]); } #pragma omp barrier numActive = 0; for (nET i=start; i<end; i++) { if (keep[in[i]]) { out[offset[tid]+numActive] = in[i]; numActive++; } } } } //Uses function pointer to test whether to keep or not //More generalizable template <typename T, typename fType> T parallel_compact_func(std::vector<T> &in, fType f, std::vector<T> &out, T currElems) { if (out.size() < currElems) out.resize(currElems); //if (out.size() < in.size()) out.resize(in.size()); T numElems = 0; if (in.size() < NUM_THREADS*16) { //for (size_t i=0; i<in.size(); i++) for (T i=0; i<currElems; i++) if (f(in[i])) out[numElems++] = in[i]; return numElems; } std::vector<T> activePerThread(NUM_THREADS, 0); std::vector<T> offset; //size_t partSize = (in.size()-1)/NUM_THREADS + 1; T partSize = (currElems-1)/NUM_THREADS+1; #pragma omp parallel { size_t tid = omp_get_thread_num(); T start = partSize*tid; T end = std::min(start+partSize, currElems); T numActive = 0; for (T i=start; i<end; i++) numActive += (f(in[i]) > 0); activePerThread[tid] = numActive; #pragma omp barrier #pragma omp single { serial_prefix_sum(offset, activePerThread); //out.resize(offset[NUM_THREADS]); } #pragma omp barrier numActive = 0; for (T i=start; i<end; i++) { if (f(in[i])) { out[offset[tid]+numActive] = in[i]; numActive++; } } } return offset[NUM_THREADS]; } template <typename T, typename fType> T parallel_compact_func_in_place(std::vector<T> &in, fType f, std::vector<T> &out, T currElems) { T numElems = parallel_compact_func(in, f, out, currElems); in.swap(out); return numElems; } //copy entire vector template <typename T> void parallel_vec_copy(std::vector<T> &op, std::vector<T> &in) { op.resize(in.size()); size_t numElems = in.size(); #pragma omp parallel for for (size_t i=0; i<numElems; i++) op[i] = in[i]; } //copy given elements from input vector to output vector template <typename T, typename eT> void parallel_vec_elems_copy(std::vector<T> &op, std::vector<T> &in, std::vector<eT> &elems) { eT numElems = elems.size(); #pragma omp parallel for for (eT i=0; i<numElems; i++) op[elems[i]] = in[elems[i]]; } //update global queue from thread's local queues template <typename buffType, typename ptrType> inline ptrType updateGlobalQueue (unsigned locQWrPtr, const unsigned locQSize, ptrType &globQWrPtr, std::array<buffType, locBuffSizeLarge> &locBuff, std::vector<buffType> &globBuff) { if (locQWrPtr >= locQSize) { ptrType tempIdx = __sync_fetch_and_add(&globQWrPtr, (ptrType)locQSize); for (ptrType bufIdx = 0; bufIdx < locQSize; bufIdx++) globBuff[tempIdx + bufIdx] = locBuff[bufIdx]; locQWrPtr = 0; } return locQWrPtr; }
GB_binop__eq_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8) // A*D function (colscale): GB (_AxD__eq_int8) // D*A function (rowscale): GB (_DxB__eq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8) // C=scalar+B GB (_bind1st__eq_int8) // C=scalar+B' GB (_bind1st_tran__eq_int8) // C=A+scalar GB (_bind2nd__eq_int8) // C=A'+scalar GB (_bind2nd_tran__eq_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriately. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L A H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLAHEImage() is a variant of adaptive histogram equalization in which the % contrast amplification is limited, so as to reduce this problem of noise % amplification. % % Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic Press, 1994. % % The format of the CLAHEImage method is: % % MagickBooleanType CLAHEImage(Image *image,const size_t width, % const size_t height,const size_t number_bins,const double clip_limit, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the tile divisions to use in horizontal direction. % % o height: the height of the tile divisions to use in vertical direction. % % o number_bins: number of bins for histogram ("dynamic range"). % % o clip_limit: contrast limit for localised changes in contrast. A limit % less than 1 results in standard non-contrast limited AHE. % % o exception: return any errors or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins, size_t *histogram) { #define NumberCLAHEGrays (65536) ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* Compute total number of excess pixels. */ cumulative_excess=0; for (i=0; i < (ssize_t) number_bins; i++) { excess=(ssize_t) histogram[i]-(ssize_t) clip_limit; if (excess > 0) cumulative_excess+=excess; } /* Clip histogram and redistribute excess pixels across all bins. */ step=cumulative_excess/number_bins; excess=(ssize_t) (clip_limit-step); for (i=0; i < (ssize_t) number_bins; i++) { if ((double) histogram[i] > clip_limit) histogram[i]=(size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess-=histogram[i]-excess; histogram[i]=(size_t) clip_limit; } else { cumulative_excess-=step; histogram[i]+=step; } } /* Redistribute remaining excess. */ do { size_t *p; size_t *q; previous_excess=cumulative_excess; p=histogram; q=histogram+number_bins; while ((cumulative_excess != 0) && (p < q)) { step=number_bins/cumulative_excess; if (step < 1) step=1; for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step) if ((double) *p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const size_t number_bins, const unsigned short *lut,const unsigned short *pixels,size_t *histogram) { const unsigned short *p; ssize_t i; /* Classify the pixels into a gray histogram. */ for (i=0; i < (ssize_t) number_bins; i++) histogram[i]=0L; p=pixels; for (i=0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q=p+tile_info->width; while (p < q) histogram[lut[*p++]]++; q+=clahe_info->width; p=q-tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12, const size_t *Q22,const size_t *Q11,const size_t *Q21, const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels) { ssize_t y; unsigned short intensity; /* Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y=(ssize_t) tile->height; y > 0; y--) { ssize_t x; for (x=(ssize_t) tile->width; x > 0; x--) { intensity=lut[*pixels]; *pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width* tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)* Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+ (tile->width-x)*Q21[intensity]))); } pixels+=(clahe_info->width-tile->width); } } static void GenerateCLAHELut(const RangeInfo *range_info, const size_t number_bins,unsigned short *lut) { ssize_t i; unsigned short delta; /* Scale input image [intensity min,max] to [0,number_bins-1]. */ delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1); for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i]=(unsigned short) ((i-range_info->min)/delta); } static void MapCLAHEHistogram(const RangeInfo *range_info, const size_t number_bins,const size_t number_pixels,size_t *histogram) { double scale, sum; ssize_t i; /* Rescale histogram to range [min-intensity .. max-intensity]. */ scale=(double) (range_info->max-range_info->min)/number_pixels; sum=0.0; for (i=0; i < (ssize_t) number_bins; i++) { sum+=histogram[i]; histogram[i]=(size_t) (range_info->min+scale*sum); if (histogram[i] > range_info->max) histogram[i]=range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const RangeInfo *range_info, const size_t number_bins,const double clip_limit,unsigned short *pixels) { MemoryInfo *tile_cache; unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return(MagickTrue); tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*number_bins, clahe_info->y*sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return(MagickFalse); lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut)); if (lut == (unsigned short *) NULL) { tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickFalse); } tiles=(size_t *) GetVirtualMemoryBlob(tile_cache); limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins); if (limit < 1UL) limit=1UL; /* Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info,number_bins,lut); p=pixels; for (y=0; y < (ssize_t) clahe_info->y; y++) { ssize_t x; for (x=0; x < (ssize_t) clahe_info->x; x++) { size_t *histogram; histogram=tiles+(number_bins*(y*clahe_info->x+x)); GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram); ClipCLAHEHistogram((double) limit,number_bins,histogram); MapCLAHEHistogram(range_info,number_bins,tile_info->width* tile_info->height,histogram); p+=tile_info->width; } p+=clahe_info->width*(tile_info->height-1); } /* Interpolate greylevel mappings to get CLAHE image. */ p=pixels; for (y=0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; ssize_t x; tile.height=tile_info->height; tile.y=y-1; offset.y=tile.y+1; if (y == 0) { /* Top row. */ tile.height=tile_info->height >> 1; tile.y=0; offset.y=0; } else if (y == (ssize_t) clahe_info->y) { /* Bottom row. */ tile.height=(tile_info->height+1) >> 1; tile.y=clahe_info->y-1; offset.y=tile.y; } for (x=0; x <= (ssize_t) clahe_info->x; x++) { tile.width=tile_info->width; tile.x=x-1; offset.x=tile.x+1; if (x == 0) { /* Left column. */ tile.width=tile_info->width >> 1; tile.x=0; offset.x=0; } else if (x == (ssize_t) clahe_info->x) { /* Right column. */ tile.width=(tile_info->width+1) >> 1; tile.x=clahe_info->x-1; offset.x=tile.x; } InterpolateCLAHE(clahe_info, tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */ tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */ tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */ tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */ &tile,lut,p); p+=tile.width; } p+=clahe_info->width*(tile.height-1); } lut=(unsigned short *) RelinquishMagickMemory(lut); tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width, const size_t height,const size_t number_bins,const double clip_limit, ExceptionInfo *exception) { #define CLAHEImageTag "CLAHE/Image" CacheView *image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo *pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); range_info.min=0; range_info.max=NumberCLAHEGrays-1; tile_info.width=width; if (tile_info.width == 0) tile_info.width=image->columns >> 3; tile_info.height=height; if (tile_info.height == 0) tile_info.height=image->rows >> 3; tile_info.x=0; if ((image->columns % tile_info.width) != 0) tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width); tile_info.y=0; if ((image->rows % tile_info.height) != 0) tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height); clahe_info.width=image->columns+tile_info.x; clahe_info.height=image->rows+tile_info.y; clahe_info.x=(ssize_t) clahe_info.width/tile_info.width; clahe_info.y=(ssize_t) clahe_info.height/tile_info.height; pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height* sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache); colorspace=image->colorspace; if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse) { pixel_cache=RelinquishVirtualMemory(pixel_cache); return(MagickFalse); } /* Initialize CLAHE pixels. */ image_view=AcquireVirtualCacheView(image,exception); progress=0; status=MagickTrue; n=0; for (y=0; y < (ssize_t) clahe_info.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y- (tile_info.y >> 1),clahe_info.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) clahe_info.width; x++) { pixels[n++]=ScaleQuantumToShort(p[0]); p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ? (size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); /* Push CLAHE pixels to CLAHE image. */ image_view=AcquireAuthenticCacheView(image,exception); n=clahe_info.width*(tile_info.y >> 1); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } n+=tile_info.x >> 1; for (x=0; x < (ssize_t) image->columns; x++) { q[0]=ScaleShortToQuantum(pixels[n++]); q+=GetPixelChannels(image); } n+=(clahe_info.width-image->columns-(tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); pixel_cache=RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); status=InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black)); white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,0,0,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; const Quantum *magick_restrict r; GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); } SetPixelViaPixelInfo(enhance_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=(double) p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(intensity))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) memset(black,0,sizeof(*black)); (void) memset(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaImageTag "Gamma/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; pixel2=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel3=zero; area=point.y; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel3); offset+=cube_size; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel4); pixel=zero; area=point.z; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, area,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriately. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; if (IsPixelGray(image,q) == MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e B a l a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteBalanceImage() applies white balancing to an image according to a % grayworld assumption in the LAB colorspace. % % The format of the WhiteBalanceImage method is: % % MagickBooleanType WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image *image, ExceptionInfo *exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView *image_view; const char *artifact; double a_mean, b_mean; MagickOffsetType progress; MagickStatusType status; ssize_t y; /* White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=TransformImageColorspace(image,LabColorspace,exception); a_mean=0.0; b_mean=0.0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { a_mean+=QuantumScale*GetPixela(image,p)-0.5; b_mean+=QuantumScale*GetPixelb(image,p)-0.5; p+=GetPixelChannels(image); } } a_mean/=((double) image->columns*image->rows); b_mean/=((double) image->columns*image->rows); progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; /* Scale the chroma distance shifted according to amount of luminance. */ a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean; b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean; SetPixela(image,ClampToQuantum(a),q); SetPixelb(image,ClampToQuantum(b),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); artifact=GetImageArtifact(image,"white-balance:vibrance"); if (artifact != (const char *) NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* Level the a & b channels. */ flags=ParseGeometry(artifact,&geometry_info); black_point=geometry_info.rho; if ((flags & PercentValue) != 0) black_point*=(double) (QuantumRange/100.0); channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel | bChannel)); status&=LevelImage(image,black_point,(double) QuantumRange-black_point, 1.0,exception); (void) SetImageChannelMask(image,channel_mask); } status&=TransformImageColorspace(image,sRGBColorspace,exception); return(status != 0 ? MagickTrue : MagickFalse); }
GB_unop__atan_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fp32_fp32) // op(A') function: GB (_unop_tran__atan_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = atanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = atanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__lnot_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_uint64 // op(A') function: GB_tran__lnot_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_uint64 ( uint8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test11.c
int g1 = 10; int g2 = 20; void foo () { 0; l1: #pragma omp barrier 1; } void bar() { 0; l2: #pragma omp barrier 1; } int main() { #pragma omp parallel { 2; // g1 = 20; if (3) { 4; foo (); 5; } else { 6; g2 = 10; l3: #pragma omp barrier 7; } foobar(g1); foobar(g2); if (8) { 9; bar(); 10; } else { 11; l4: #pragma omp barrier 12; } 13; } }
parallel_invoker.h
// Copyright 2019 The MediaPipe Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Parallel for loop execution. // For details adapt parallel_using_* flags defined in parallel_invoker.cc. // Usage example (for 1D): // Define Functor or lambda function that implements: // void operator()(const BlockedRange & range) const; // (in addition functor needs to be copyable). // Execute a for loop in parallel from 0 to N via: // ParallelFor(0, // start_index // num_frames, // end_index, exclusive // 1 // number of elements processed per iteration // [](const BlockedRange& range) { // // Process per-thread sub-range // for (int i = range.begin(); i < range.end(); ++i) { // // Process i'th item. // } // } // Specific implementation to copy a vector of images in parallel. // class CopyInvoker { // public: // CopyInvoker(const vector<cv::Mat>& inputs, // vector<cv::Mat*>* outputs) // : inputs_(inputs), outputs_(outputs) { // } // CopyInvoker(const CopyInvoker& rhs) // : inputs_(rhs.inputs_), outputs_(rhs.outputs) { // } // void operator()(const BlockedRange& range) { // for (int frame = range.begin(); frame < range.end(); ++frame) { // inputs_[frame].copyTo(*(*outputs_)[frame]); // } // } // private: // const vector<cv::Mat>& inputs_; // vector<cv::Mat*>* outputs_; // } // vector<cv::Mat> inputs; // vector<cv::Mat*> outputs; // ParallelFor(0, num_frames, 1, CopyInvoker(inputs, &outputs)); // // OR (with lambdas): // ParallelFor(0, num_frames, 1, // [&inputs, &outputs](const BlockedRange& range) { // for (int frame = range.begin(); frame < range.end(); ++frame) { // inputs[frame].copyTo(*(outputs)[frame]); // } // } #ifndef MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_ #define MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_ #include <stddef.h> #include <memory> #include "absl/synchronization/mutex.h" #include "mediapipe/framework/port/logging.h" #ifdef PARALLEL_INVOKER_ACTIVE #include "mediapipe/framework/port/threadpool.h" #ifdef __APPLE__ #include <dispatch/dispatch.h> #include <stdatomic.h> #endif #endif // PARALLEL_INVOKER_ACTIVE // Specifies parallelization implementation to use. enum PARALLEL_INVOKER_MODE { PARALLEL_INVOKER_NONE = 0, // Uses single threaded execution PARALLEL_INVOKER_THREAD_POOL = 1, // Uses //thread/threadpool PARALLEL_INVOKER_OPENMP = 2, // Uses OpenMP (requires compiler support) PARALLEL_INVOKER_GCD = 3, // Uses GCD (Apple) PARALLEL_INVOKER_MAX_VALUE = 4, // Increase when adding more modes }; extern int flags_parallel_invoker_mode; extern int flags_parallel_invoker_max_threads; // Note flag: Parallel processing only activated if // PARALLEL_INVOKER_ACTIVE is defined. namespace mediapipe { // Partitions the range [begin, end) into equal blocks of size grain_size each // (except last one, might be less than grain_size). class BlockedRange { public: BlockedRange(int begin, int end, int grain_size) : begin_(begin), end_(end), grain_size_(grain_size) {} int begin() const { return begin_; } int end() const { return end_; } int grain_size() const { return grain_size_; } private: int begin_; int end_; int grain_size_; }; // Partitions the range row_range x col_range into equal // blocks of size row_range.grain_size() x col_range.grain_size() each // (except last column and row might be of size less than grain_size in one // or both of their dimensions). class BlockedRange2D { public: BlockedRange2D(const BlockedRange& rows, const BlockedRange& cols) : rows_(rows), cols_(cols) {} const BlockedRange& rows() const { return rows_; } const BlockedRange& cols() const { return cols_; } private: BlockedRange rows_; BlockedRange cols_; }; #ifdef PARALLEL_INVOKER_ACTIVE // Singleton ThreadPool for parallel invoker. ThreadPool* ParallelInvokerThreadPool(); #ifdef __APPLE__ // Enable to allow GCD as an option beside ThreadPool. #define USE_PARALLEL_INVOKER_GCD 1 #define CHECK_GCD_PARALLEL_WORK_COUNT DEBUG template <class Invoker> class ParallelInvokerGCDContext { public: ParallelInvokerGCDContext(const Invoker& invoker, const BlockedRange& rows) : local_invoker_(invoker), rows_(rows) { #if CHECK_GCD_PARALLEL_WORK_COUNT count_ = 0; #endif } const Invoker& invoker() { #if CHECK_GCD_PARALLEL_WORK_COUNT // Implicitly tracking the # of launched tasks at invoker retrieval. atomic_fetch_add(&count_, 1); #endif return local_invoker_; } const BlockedRange& rows() const { return rows_; } #if CHECK_GCD_PARALLEL_WORK_COUNT const int count() { return atomic_load(&count_); } #endif private: Invoker local_invoker_; const BlockedRange& rows_; #if CHECK_GCD_PARALLEL_WORK_COUNT _Atomic(int32_t) count_; #endif }; template <class Invoker> class ParallelInvokerGCDContext2D : public ParallelInvokerGCDContext<Invoker> { public: ParallelInvokerGCDContext2D(const Invoker& invoker, const BlockedRange& rows, const BlockedRange& cols) : ParallelInvokerGCDContext<Invoker>(invoker, rows), cols_(cols) {} const BlockedRange& cols() const { return cols_; } private: BlockedRange cols_; }; template <class Invoker> static void ParallelForGCDTask(void* context, size_t index) { ParallelInvokerGCDContext<Invoker>* invoker_context = static_cast<ParallelInvokerGCDContext<Invoker>*>(context); const BlockedRange& all_tasks = invoker_context->rows(); int start = all_tasks.begin() + index * all_tasks.grain_size(); int end = std::min(all_tasks.end(), start + all_tasks.grain_size()); BlockedRange this_task(start, end, all_tasks.grain_size()); const Invoker& invoker = invoker_context->invoker(); invoker(this_task); } template <class Invoker> static void ParallelForGCDTask2D(void* context, size_t index) { ParallelInvokerGCDContext2D<Invoker>* invoker_context = static_cast<ParallelInvokerGCDContext2D<Invoker>*>(context); // Partitioning across rows. const BlockedRange& all_tasks = invoker_context->rows(); int start = all_tasks.begin() + index * all_tasks.grain_size(); int end = std::min(all_tasks.end(), start + all_tasks.grain_size()); BlockedRange this_task(start, end, all_tasks.grain_size()); const Invoker& invoker = invoker_context->invoker(); invoker(BlockedRange2D(this_task, invoker_context->cols())); } #endif // __APPLE__ #endif // PARALLEL_INVOKER_ACTIVE // Simple wrapper for compatibility with below ParallelFor function. template <class Invoker> void SerialFor(size_t start, size_t end, size_t grain_size, const Invoker& invoker) { invoker(BlockedRange(start, end, 1)); } inline void CheckAndSetInvokerOptions() { #if defined(PARALLEL_INVOKER_ACTIVE) #if defined(__ANDROID__) // If unsupported option is selected, force usage of OpenMP if detected, and // ThreadPool otherwise. if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE && flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL && flags_parallel_invoker_mode != PARALLEL_INVOKER_OPENMP) { #if defined(_OPENMP) LOG(WARNING) << "Unsupported invoker mode selected on Android. " << "OpenMP linkage detected, so falling back to OpenMP"; flags_parallel_invoker_mode = PARALLEL_INVOKER_OPENMP; #else // _OPENMP // Fallback mode for active parallel invoker without OpenMP is ThreadPool. LOG(WARNING) << "Unsupported invoker mode selected on Android. " << "Falling back to ThreadPool"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // _OPENMP } #endif // __ANDROID__ #if defined(__APPLE__) || defined(__EMSCRIPTEN__) // Force usage of ThreadPool if unsupported option is selected. // (OpenMP is not supported on iOS, due to missing clang support). if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE && #if defined(USE_PARALLEL_INVOKER_GCD) flags_parallel_invoker_mode != PARALLEL_INVOKER_GCD && #endif // USE_PARALLEL_INVOKER_GCD flags_parallel_invoker_mode != PARALLEL_INVOKER_THREAD_POOL) { LOG(WARNING) << "Unsupported invoker mode selected on iOS. " << "Falling back to ThreadPool mode"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; } #endif // __APPLE__ || __EMSCRIPTEN__ #if !defined(__APPLE__) && !defined(__EMSCRIPTEN__) && !defined(__ANDROID__) flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // !__APPLE__ && !__EMSCRIPTEN__ && !__ANDROID__ // If OpenMP is requested, make sure we can actually use it, and fall back // to ThreadPool if not. if (flags_parallel_invoker_mode == PARALLEL_INVOKER_OPENMP) { #if !defined(_OPENMP) LOG(ERROR) << "OpenMP invoker mode selected but not compiling with OpenMP " << "enabled. Falling back to ThreadPool"; flags_parallel_invoker_mode = PARALLEL_INVOKER_THREAD_POOL; #endif // _OPENMP } #else // PARALLEL_INVOKER_ACTIVE if (flags_parallel_invoker_mode != PARALLEL_INVOKER_NONE) { LOG(ERROR) << "Parallel execution requested but PARALLEL_INVOKER_ACTIVE " << "compile flag is not set. Falling back to single threaded " << "execution."; flags_parallel_invoker_mode = PARALLEL_INVOKER_NONE; } #endif // PARALLEL_INVOKER_ACTIVE CHECK_LT(flags_parallel_invoker_mode, PARALLEL_INVOKER_MAX_VALUE) << "Invalid invoker mode specified."; CHECK_GE(flags_parallel_invoker_mode, 0) << "Invalid invoker mode specified."; } // Performs parallel iteration from [start to end), scheduling grain_size // iterations per thread. For each iteration // invoker(BlockedRange(thread_local_start, thread_local_end)) // is called. Each thread is given its local copy of invoker, i.e. // invoker needs to have copy constructor defined. template <class Invoker> void ParallelFor(size_t start, size_t end, size_t grain_size, const Invoker& invoker) { #ifdef PARALLEL_INVOKER_ACTIVE CheckAndSetInvokerOptions(); switch (flags_parallel_invoker_mode) { #if defined(__APPLE__) case PARALLEL_INVOKER_GCD: { int iterations_remain = (end - start + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange(start, std::min(end, start + grain_size), 1)); } else { BlockedRange all_tasks(start, end, grain_size); ParallelInvokerGCDContext<Invoker> context(invoker, all_tasks); dispatch_queue_t concurrent_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_apply_f(iterations_remain, concurrent_queue, &context, ParallelForGCDTask<Invoker>); #if CHECK_GCD_PARALLEL_WORK_COUNT CHECK_EQ(iterations_remain, context.count()); #endif } break; } #endif // __APPLE__ case PARALLEL_INVOKER_THREAD_POOL: { int iterations_remain = (end - start + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange(start, std::min(end, start + grain_size), 1)); break; } struct { absl::Mutex mutex; absl::CondVar completed; int iterations_remain ABSL_GUARDED_BY(mutex); } loop; { absl::MutexLock lock(&loop.mutex); loop.iterations_remain = iterations_remain; } for (int x = start; x < end; x += grain_size) { auto loop_func = [x, end, grain_size, &loop, invoker]() { // Execute invoker. invoker(BlockedRange(x, std::min(end, x + grain_size), 1)); // Decrement counter. absl::MutexLock lock(&loop.mutex); --loop.iterations_remain; if (loop.iterations_remain == 0) { loop.completed.SignalAll(); } }; // Attempt to run in parallel, if busy run serial to avoid deadlocking. // This can happen during nested invocation of ParallelFor, as if the // loop iteration itself is calling ParallelFor we might deadlock if // we can not guarantee for the iteration to be scheduled. ParallelInvokerThreadPool()->Schedule(loop_func); } // Wait on termination of all iterations. loop.mutex.Lock(); while (loop.iterations_remain > 0) { loop.completed.Wait(&loop.mutex); } loop.mutex.Unlock(); break; } case PARALLEL_INVOKER_OPENMP: { // Use thread-local copy of invoker. Invoker local_invoker(invoker); #pragma omp parallel for firstprivate(local_invoker) \ num_threads(flags_parallel_invoker_max_threads) for (int x = start; x < end; ++x) { local_invoker(BlockedRange(x, x + 1, 1)); } break; } case PARALLEL_INVOKER_NONE: { SerialFor(start, end, grain_size, invoker); break; } case PARALLEL_INVOKER_MAX_VALUE: { LOG(FATAL) << "Impossible."; break; } } #else SerialFor(start, end, grain_size, invoker); #endif // PARALLEL_INVOKER_ACTIVE } // Simple wrapper for compatibility with below ParallelFor2D function. template <class Invoker> void SerialFor2D(size_t start_row, size_t end_row, size_t start_col, size_t end_col, size_t grain_size, const Invoker& invoker) { invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); } // Same as above ParallelFor for 2D iteration. template <class Invoker> void ParallelFor2D(size_t start_row, size_t end_row, size_t start_col, size_t end_col, size_t grain_size, const Invoker& invoker) { #ifdef PARALLEL_INVOKER_ACTIVE CheckAndSetInvokerOptions(); switch (flags_parallel_invoker_mode) { #if defined(__APPLE__) case PARALLEL_INVOKER_GCD: { const int iterations_remain = (end_row - start_row + grain_size - 1) / grain_size; CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); } else { BlockedRange all_tasks(start_row, end_row, grain_size); ParallelInvokerGCDContext2D<Invoker> context( invoker, all_tasks, BlockedRange(start_col, end_col, grain_size)); dispatch_queue_t concurrent_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_apply_f(iterations_remain, concurrent_queue, &context, ParallelForGCDTask2D<Invoker>); #if CHECK_GCD_PARALLEL_WORK_COUNT CHECK_EQ(iterations_remain, context.count()); #endif } break; } #endif // __APPLE__ case PARALLEL_INVOKER_THREAD_POOL: { int iterations_remain = end_row - start_row; // Guarded by loop_mutex CHECK_GT(iterations_remain, 0); if (iterations_remain == 1) { // Execute invoker serially. invoker(BlockedRange2D(BlockedRange(start_row, end_row, 1), BlockedRange(start_col, end_col, 1))); break; } absl::Mutex loop_mutex; absl::CondVar loop_completed; for (int y = start_row; y < end_row; ++y) { auto loop_func = [y, start_col, end_col, &loop_mutex, &loop_completed, &iterations_remain, invoker]() { // Execute invoker. invoker(BlockedRange2D(BlockedRange(y, y + 1, 1), BlockedRange(start_col, end_col, 1))); // Decrement counter. absl::MutexLock lock(&loop_mutex); --iterations_remain; if (iterations_remain == 0) { loop_completed.Signal(); } }; // Attempt to run in parallel, if busy run serial to avoid deadlocking. ParallelInvokerThreadPool()->Schedule(loop_func); } // Wait on termination of all iterations. loop_mutex.Lock(); while (iterations_remain > 0) { loop_completed.Wait(&loop_mutex); } loop_mutex.Unlock(); break; } case PARALLEL_INVOKER_OPENMP: { // Use thread-local copy of invoker. Invoker local_invoker(invoker); #pragma omp parallel for firstprivate(local_invoker) \ num_threads(flags_parallel_invoker_max_threads) for (int y = start_row; y < end_row; ++y) { local_invoker(BlockedRange2D(BlockedRange(y, y + 1, 1), BlockedRange(start_col, end_col, 1))); } break; } case PARALLEL_INVOKER_NONE: { SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker); break; } case PARALLEL_INVOKER_MAX_VALUE: { LOG(FATAL) << "Impossible."; break; } } #else SerialFor2D(start_row, end_row, start_col, end_col, grain_size, invoker); #endif // PARALLEL_INVOKER_ACTIVE } } // namespace mediapipe #endif // MEDIAPIPE_UTIL_TRACKING_PARALLEL_INVOKER_H_
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); blake2b_final( S->R, out, outlen ); return 0; } int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in ) return -1; if ( NULL == out ) return -1; if ( NULL == key ) keylen = 0; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); blake2b_final( FS, out, outlen ); return 0; } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; //blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); blake2bp_state S[1]; blake2bp_init_key( S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES ); blake2bp_update( S, buf, i ); blake2bp_final( S, hash, BLAKE2B_OUTBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
GB_subassign_04.c
//------------------------------------------------------------------------------ // GB_subassign_04: C(I,J) += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 04: C(I,J) += A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_04 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 04: C(I,J) += A ; using S //-------------------------------------------------------------------------- // Time: Close to Optimal. Every entry in A must be visited, and the // corresponding entry in S must then be found. Time for this phase is // Omega(nnz(A)), but S has already been constructed, in Omega(nnz(S)) // time. This method simply traverses all of A+S (like GB_add for // computing A+S), the same as Method 02. Time taken is O(nnz(A)+nnz(S)). // The only difference is that the traversal of A+S can terminate if A is // exhausted. Entries in S but not A do not actually require any work // (unlike Method 02, which must visit all entries in A+S). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. // Compare with Method 16, which computes C(I,J)<!M> += A, using S. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( C ): no change, with accum // [X . 1]: action: ( X ): still a zombie GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_withaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT_aij ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
jacobi-ompacc.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ // Must split the omp for into two parallel for regions since the translation focuses on parallel to generate the outlined kernel // We need two CUDA kernels for implementing global synchronization so we have to have two omp parallel directives!! //#pragma omp target map(in:n, m, omega, ax, ay, u[0:n][0:m],f[0:n][0:m]) map(alloc:uold[0:n][0:m]) //#pragma omp parallel // { #pragma omp target map(in:n, m, u[0:n][0:m]) map(out:uold[0:n][0:m]) #pragma omp parallel for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(in:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(out:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(16*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(16*t3+Nx+12,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),64*t4+62),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
HybridRepSetReader.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2019 QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// /** @file * * derived from SplineSetReader */ #ifndef QMCPLUSPLUS_HYBRIDREP_READER_H #define QMCPLUSPLUS_HYBRIDREP_READER_H #include <Numerics/Quadrature.h> #include <Numerics/Bessel.h> #include <QMCWaveFunctions/BsplineFactory/HybridRepCenterOrbitals.h> #include "OhmmsData/AttributeSet.h" //#include <QMCHamiltonians/Ylm.h> //#define PRINT_RADIAL namespace qmcplusplus { template<typename ST, typename LT> struct Gvectors { typedef TinyVector<ST, 3> PosType; typedef std::complex<ST> ValueType; const LT& Lattice; std::vector<PosType> gvecs_cart; //Cartesian. std::vector<ST> gmag; const size_t NumGvecs; Gvectors(const std::vector<TinyVector<int, 3>>& gvecs_in, const LT& Lattice_in, const TinyVector<int, 3>& HalfG, size_t first, size_t last) : Lattice(Lattice_in), NumGvecs(last - first) { gvecs_cart.resize(NumGvecs); gmag.resize(NumGvecs); #pragma omp parallel for for (size_t ig = 0; ig < NumGvecs; ig++) { TinyVector<ST, 3> gvec_shift; gvec_shift = gvecs_in[ig + first] + HalfG * 0.5; gvecs_cart[ig] = Lattice.k_cart(gvec_shift); gmag[ig] = std::sqrt(dot(gvecs_cart[ig], gvecs_cart[ig])); } } template<typename YLM_ENGINE, typename VVT> void calc_Ylm_G(const size_t ig, YLM_ENGINE& Ylm, VVT& YlmG) const { PosType Ghat(0.0, 0.0, 1.0); if (gmag[ig] > 0) Ghat = gvecs_cart[ig] / gmag[ig]; Ylm.evaluateV(Ghat[0], Ghat[1], Ghat[2], YlmG.data()); } template<typename VVT> inline void calc_jlm_G(const int lmax, ST& r, const size_t ig, VVT& j_lm_G) const { bessel_steed_array_cpu(lmax, gmag[ig] * r, j_lm_G.data()); for (size_t l = lmax; l > 0; l--) for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) j_lm_G[lm] = j_lm_G[l]; } template<typename PT, typename VT> inline void calc_phase_shift(const PT& RSoA, const size_t ig, VT& phase_shift_real, VT& phase_shift_imag) const { const ST* restrict px = RSoA.data(0); const ST* restrict py = RSoA.data(1); const ST* restrict pz = RSoA.data(2); ST* restrict v_r = phase_shift_real.data(); ST* restrict v_i = phase_shift_imag.data(); const ST& gv_x = gvecs_cart[ig][0]; const ST& gv_y = gvecs_cart[ig][1]; const ST& gv_z = gvecs_cart[ig][2]; #pragma omp simd aligned(px, py, pz, v_r, v_i) for (size_t iat = 0; iat < RSoA.size(); iat++) sincos(px[iat] * gv_x + py[iat] * gv_y + pz[iat] * gv_z, v_i + iat, v_r + iat); } template<typename PT> ValueType evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos) { assert(cG.size() == NumGvecs); std::complex<ST> val(0.0, 0.0); for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); val += cG[ig] * pw0; } return val; } template<typename PT> void evaluate_psi_r(const Vector<std::complex<double>>& cG, const PT& pos, ValueType& phi, ValueType& d2phi) { assert(cG.size() == NumGvecs); d2phi = phi = 0.0; for (size_t ig = 0; ig < NumGvecs; ig++) { ST s, c; sincos(dot(gvecs_cart[ig], pos), &s, &c); ValueType pw0(c, s); phi += cG[ig] * pw0; d2phi += cG[ig] * pw0 * (-dot(gvecs_cart[ig], gvecs_cart[ig])); } } double evaluate_KE(const Vector<std::complex<double>>& cG) { assert(cG.size() == NumGvecs); double KE = 0; for (size_t ig = 0; ig < NumGvecs; ig++) KE += dot(gvecs_cart[ig], gvecs_cart[ig]) * (cG[ig].real() * cG[ig].real() + cG[ig].imag() * cG[ig].imag()); return KE / 2.0; } }; /** General HybridRepSetReader to handle any unitcell */ template<typename SA> struct HybridRepSetReader : public SplineSetReader<SA> { typedef SplineSetReader<SA> BaseReader; using BaseReader::bspline; using BaseReader::mybuilder; using BaseReader::rotate_phase_i; using BaseReader::rotate_phase_r; using typename BaseReader::DataType; HybridRepSetReader(EinsplineSetBuilder* e) : BaseReader(e) {} /** initialize basic parameters of atomic orbitals */ void initialize_hybridrep_atomic_centers() override { OhmmsAttributeSet a; std::string scheme_name("Consistent"); std::string s_function_name("LEKS2018"); a.add(scheme_name, "smoothing_scheme"); a.add(s_function_name, "smoothing_function"); a.put(mybuilder->XMLRoot); // assign smooth_scheme if (scheme_name == "Consistent") bspline->smooth_scheme = SA::smoothing_schemes::CONSISTENT; else if (scheme_name == "SmoothAll") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHALL; else if (scheme_name == "SmoothPartial") bspline->smooth_scheme = SA::smoothing_schemes::SMOOTHPARTIAL; else APP_ABORT("initialize_hybridrep_atomic_centers wrong smoothing_scheme name! Only allows Consistent, SmoothAll or " "SmoothPartial."); // assign smooth_function if (s_function_name == "LEKS2018") bspline->smooth_func_id = smoothing_functions::LEKS2018; else if (s_function_name == "coscos") bspline->smooth_func_id = smoothing_functions::COSCOS; else if (s_function_name == "linear") bspline->smooth_func_id = smoothing_functions::LINEAR; else APP_ABORT( "initialize_hybridrep_atomic_centers wrong smoothing_function name! Only allows LEKS2018, coscos or linear."); app_log() << "Hybrid orbital representation uses " << scheme_name << " smoothing scheme and " << s_function_name << " smoothing function." << std::endl; bspline->set_info(*(mybuilder->SourcePtcl), mybuilder->TargetPtcl, mybuilder->Super2Prim); auto& centers = bspline->AtomicCenters; auto& ACInfo = mybuilder->AtomicCentersInfo; // load atomic center info only when it is not initialized if (centers.size() == 0) { bool success = true; app_log() << "Reading atomic center info for hybrid representation" << std::endl; for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { const int my_GroupID = ACInfo.GroupID[center_idx]; if (ACInfo.cutoff[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'cutoff_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.inner_cutoff[center_idx] < 0) { const double inner_cutoff = std::max(ACInfo.cutoff[center_idx] - 0.3, 0.0); app_log() << "Hybrid orbital representation setting 'inner_cutoff' to " << inner_cutoff << " for group " << my_GroupID << " as atom " << center_idx << std::endl; // overwrite the inner_cutoff of all the atoms of the same species for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) ACInfo.inner_cutoff[id] = inner_cutoff; } else if (ACInfo.inner_cutoff[center_idx] > ACInfo.cutoff[center_idx]) { app_error() << "Hybrid orbital representation 'inner_cutoff' must be smaller than 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.cutoff[center_idx] > 0) { if (ACInfo.lmax[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'lmax' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_radius[center_idx] < 0 && ACInfo.spline_npoints[center_idx] < 0) { app_log() << "Parameters 'spline_radius' and 'spline_npoints' for group " << my_GroupID << " as atom " << center_idx << " are not specified." << std::endl; const double delta = std::min(0.02, ACInfo.cutoff[center_idx] / 4.0); const int n_grid_point = std::ceil((ACInfo.cutoff[center_idx] + 1e-4) / delta) + 3; for (int id = 0; id < ACInfo.Ncenters; id++) if (my_GroupID == ACInfo.GroupID[id]) { ACInfo.spline_npoints[id] = n_grid_point; ACInfo.spline_radius[id] = (n_grid_point - 1) * delta; } app_log() << " Based on default grid point distance " << delta << std::endl; app_log() << " Setting 'spline_npoints' to " << ACInfo.spline_npoints[center_idx] << std::endl; app_log() << " Setting 'spline_radius' to " << ACInfo.spline_radius[center_idx] << std::endl; } else { if (ACInfo.spline_radius[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_radius' for atom " << center_idx << std::endl; success = false; } if (ACInfo.spline_npoints[center_idx] < 0) { app_error() << "Hybrid orbital representation needs parameter 'spline_npoints' for atom " << center_idx << std::endl; success = false; } } // check maximally allowed cutoff_radius double max_allowed_cutoff = ACInfo.spline_radius[center_idx] - 2.0 * ACInfo.spline_radius[center_idx] / (ACInfo.spline_npoints[center_idx] - 1); if (success && ACInfo.cutoff[center_idx] > max_allowed_cutoff) { app_error() << "Hybrid orbital representation requires cutoff_radius<=" << max_allowed_cutoff << " calculated by spline_radius-2*spline_radius/(spline_npoints-1) for atom " << center_idx << std::endl; success = false; } } else { // no atomic regions for this atom type ACInfo.spline_radius[center_idx] = 0.0; ACInfo.spline_npoints[center_idx] = 0; ACInfo.lmax[center_idx] = 0; } } if (!success) BaseReader::myComm->barrier_and_abort("initialize_hybridrep_atomic_centers Failed to initialize atomic centers " "in hybrid orbital representation!"); for (int center_idx = 0; center_idx < ACInfo.Ncenters; center_idx++) { AtomicOrbitals<DataType> oneCenter(ACInfo.lmax[center_idx]); oneCenter.set_info(ACInfo.ion_pos[center_idx], ACInfo.cutoff[center_idx], ACInfo.inner_cutoff[center_idx], ACInfo.spline_radius[center_idx], ACInfo.non_overlapping_radius[center_idx], ACInfo.spline_npoints[center_idx]); centers.push_back(oneCenter); } } } /** initialize construct atomic orbital radial functions from plane waves */ inline void create_atomic_centers_Gspace(Vector<std::complex<double>>& cG, Communicate& band_group_comm, int iorb) override { band_group_comm.bcast(rotate_phase_r); band_group_comm.bcast(rotate_phase_i); band_group_comm.bcast(cG); //distribute G-vectors over processor groups const int Ngvecs = mybuilder->Gvecs[0].size(); const int Nprocs = band_group_comm.size(); const int Ngvecgroups = std::min(Ngvecs, Nprocs); Communicate gvec_group_comm(band_group_comm, Ngvecgroups); std::vector<int> gvec_groups(Ngvecgroups + 1, 0); FairDivideLow(Ngvecs, Ngvecgroups, gvec_groups); const int gvec_first = gvec_groups[gvec_group_comm.getGroupID()]; const int gvec_last = gvec_groups[gvec_group_comm.getGroupID() + 1]; // prepare Gvecs Ylm(G) typedef typename EinsplineSetBuilder::UnitCellType UnitCellType; Gvectors<double, UnitCellType> Gvecs(mybuilder->Gvecs[0], mybuilder->PrimCell, bspline->HalfG, gvec_first, gvec_last); // if(band_group_comm.isGroupLeader()) std::cout << "print band=" << iorb << " KE=" << Gvecs.evaluate_KE(cG) << std::endl; std::vector<AtomicOrbitals<DataType>>& centers = bspline->AtomicCenters; app_log() << "Transforming band " << iorb << " on Rank 0" << std::endl; // collect atomic centers by group std::vector<int> uniq_species; for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; int found_idx = -1; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { found_idx = idx; break; } if (found_idx < 0) uniq_species.push_back(my_GroupID); } // construct group list std::vector<std::vector<int>> group_list(uniq_species.size()); for (int center_idx = 0; center_idx < centers.size(); center_idx++) { auto& ACInfo = mybuilder->AtomicCentersInfo; const int my_GroupID = ACInfo.GroupID[center_idx]; for (size_t idx = 0; idx < uniq_species.size(); idx++) if (my_GroupID == uniq_species[idx]) { group_list[idx].push_back(center_idx); break; } } for (int group_idx = 0; group_idx < group_list.size(); group_idx++) { const auto& mygroup = group_list[group_idx]; const double spline_radius = centers[mygroup[0]].getSplineRadius(); const int spline_npoints = centers[mygroup[0]].getSplineNpoints(); const int lmax = centers[mygroup[0]].getLmax(); const double delta = spline_radius / static_cast<double>(spline_npoints - 1); const int lm_tot = (lmax + 1) * (lmax + 1); const size_t natoms = mygroup.size(); const int policy = lm_tot > natoms ? 0 : 1; std::vector<std::complex<double>> i_power(lm_tot); // rotate phase is introduced here. std::complex<double> i_temp(rotate_phase_r, rotate_phase_i); for (size_t l = 0; l <= lmax; l++) { for (size_t lm = l * l; lm < (l + 1) * (l + 1); lm++) i_power[lm] = i_temp; i_temp *= std::complex<double>(0.0, 1.0); } std::vector<Matrix<double>> all_vals(natoms); std::vector<std::vector<aligned_vector<double>>> vals_local(spline_npoints * omp_get_max_threads()); VectorSoaContainer<double, 3> myRSoA(natoms); for (size_t idx = 0; idx < natoms; idx++) { all_vals[idx].resize(spline_npoints, lm_tot * 2); all_vals[idx] = 0.0; myRSoA(idx) = centers[mygroup[idx]].getCenterPos(); } #pragma omp parallel { const size_t tid = omp_get_thread_num(); const size_t nt = omp_get_num_threads(); for (int ip = 0; ip < spline_npoints; ip++) { const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_local[ip_idx].resize(lm_tot * 2); for (size_t lm = 0; lm < lm_tot * 2; lm++) { auto& vals = vals_local[ip_idx][lm]; vals.resize(natoms); std::fill(vals.begin(), vals.end(), 0.0); } } else { vals_local[ip_idx].resize(natoms * 2); for (size_t iat = 0; iat < natoms * 2; iat++) { auto& vals = vals_local[ip_idx][iat]; vals.resize(lm_tot); std::fill(vals.begin(), vals.end(), 0.0); } } } const size_t size_pw_tile = 32; const size_t num_pw_tiles = (Gvecs.NumGvecs + size_pw_tile - 1) / size_pw_tile; aligned_vector<double> j_lm_G(lm_tot, 0.0); std::vector<aligned_vector<double>> phase_shift_r(size_pw_tile); std::vector<aligned_vector<double>> phase_shift_i(size_pw_tile); std::vector<aligned_vector<double>> YlmG(size_pw_tile); for (size_t ig = 0; ig < size_pw_tile; ig++) { phase_shift_r[ig].resize(natoms); phase_shift_i[ig].resize(natoms); YlmG[ig].resize(lm_tot); } SoaSphericalTensor<double> Ylm(lmax); #pragma omp for for (size_t tile_id = 0; tile_id < num_pw_tiles; tile_id++) { const size_t ig_first = tile_id * size_pw_tile; const size_t ig_last = std::min((tile_id + 1) * size_pw_tile, Gvecs.NumGvecs); for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate phase shift for all the centers of this group Gvecs.calc_phase_shift(myRSoA, ig, phase_shift_r[ig_local], phase_shift_i[ig_local]); Gvecs.calc_Ylm_G(ig, Ylm, YlmG[ig_local]); } for (int ip = 0; ip < spline_npoints; ip++) { double r = delta * static_cast<double>(ip); const size_t ip_idx = tid * spline_npoints + ip; for (size_t ig = ig_first; ig < ig_last; ig++) { const size_t ig_local = ig - ig_first; // calculate spherical bessel function Gvecs.calc_jlm_G(lmax, r, ig, j_lm_G); for (size_t lm = 0; lm < lm_tot; lm++) j_lm_G[lm] *= YlmG[ig_local][lm]; const double cG_r = cG[ig + gvec_first].real(); const double cG_i = cG[ig + gvec_first].imag(); if (policy == 1) { for (size_t lm = 0; lm < lm_tot; lm++) { double* restrict vals_r = vals_local[ip_idx][lm * 2].data(); double* restrict vals_i = vals_local[ip_idx][lm * 2 + 1].data(); const double* restrict ps_r_ptr = phase_shift_r[ig_local].data(); const double* restrict ps_i_ptr = phase_shift_i[ig_local].data(); double cG_j_r = cG_r * j_lm_G[lm]; double cG_j_i = cG_i * j_lm_G[lm]; #pragma omp simd aligned(vals_r, vals_i, ps_r_ptr, ps_i_ptr) for (size_t idx = 0; idx < natoms; idx++) { const double ps_r = ps_r_ptr[idx]; const double ps_i = ps_i_ptr[idx]; vals_r[idx] += cG_j_r * ps_r - cG_j_i * ps_i; vals_i[idx] += cG_j_i * ps_r + cG_j_r * ps_i; } } } else { for (size_t idx = 0; idx < natoms; idx++) { double* restrict vals_r = vals_local[ip_idx][idx * 2].data(); double* restrict vals_i = vals_local[ip_idx][idx * 2 + 1].data(); const double* restrict j_lm_G_ptr = j_lm_G.data(); double cG_ps_r = cG_r * phase_shift_r[ig_local][idx] - cG_i * phase_shift_i[ig_local][idx]; double cG_ps_i = cG_i * phase_shift_r[ig_local][idx] + cG_r * phase_shift_i[ig_local][idx]; #pragma omp simd aligned(vals_r, vals_i, j_lm_G_ptr) for (size_t lm = 0; lm < lm_tot; lm++) { const double jlm = j_lm_G_ptr[lm]; vals_r[lm] += cG_ps_r * jlm; vals_i[lm] += cG_ps_i * jlm; } } } } } } #pragma omp for collapse(2) for (int ip = 0; ip < spline_npoints; ip++) for (size_t idx = 0; idx < natoms; idx++) { double* vals = all_vals[idx][ip]; for (size_t tid = 0; tid < nt; tid++) for (size_t lm = 0; lm < lm_tot; lm++) { double vals_th_r, vals_th_i; const size_t ip_idx = tid * spline_npoints + ip; if (policy == 1) { vals_th_r = vals_local[ip_idx][lm * 2][idx]; vals_th_i = vals_local[ip_idx][lm * 2 + 1][idx]; } else { vals_th_r = vals_local[ip_idx][idx * 2][lm]; vals_th_i = vals_local[ip_idx][idx * 2 + 1][lm]; } const double real_tmp = 4.0 * M_PI * i_power[lm].real(); const double imag_tmp = 4.0 * M_PI * i_power[lm].imag(); vals[lm] += vals_th_r * real_tmp - vals_th_i * imag_tmp; vals[lm + lm_tot] += vals_th_i * real_tmp + vals_th_r * imag_tmp; } } } //app_log() << "Building band " << iorb << " at center " << center_idx << std::endl; for (size_t idx = 0; idx < natoms; idx++) { // reduce all_vals band_group_comm.reduce_in_place(all_vals[idx].data(), all_vals[idx].size()); if (!band_group_comm.isGroupLeader()) continue; #pragma omp parallel for for (int lm = 0; lm < lm_tot; lm++) { auto& mycenter = centers[mygroup[idx]]; aligned_vector<double> splineData_r(spline_npoints); UBspline_1d_d* atomic_spline_r; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_r[ip] = all_vals[idx][ip][lm]; atomic_spline_r = einspline::create(atomic_spline_r, 0.0, spline_radius, spline_npoints, splineData_r.data(), ((lm == 0) || (lm > 3))); if (!bspline->is_complex) { mycenter.set_spline(atomic_spline_r, lm, iorb); einspline::destroy(atomic_spline_r); } else { aligned_vector<double> splineData_i(spline_npoints); UBspline_1d_d* atomic_spline_i; for (size_t ip = 0; ip < spline_npoints; ip++) splineData_i[ip] = all_vals[idx][ip][lm + lm_tot]; atomic_spline_i = einspline::create(atomic_spline_i, 0.0, spline_radius, spline_npoints, splineData_i.data(), ((lm == 0) || (lm > 3))); mycenter.set_spline(atomic_spline_r, lm, iorb * 2); mycenter.set_spline(atomic_spline_i, lm, iorb * 2 + 1); einspline::destroy(atomic_spline_r); einspline::destroy(atomic_spline_i); } } } #ifdef PRINT_RADIAL char fname[64]; sprintf(fname, "band_%d_center_%d_pw.dat", iorb, center_idx); FILE* fout_pw = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_v.dat", iorb, center_idx); FILE* fout_spline_v = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_g.dat", iorb, center_idx); FILE* fout_spline_g = fopen(fname, "w"); sprintf(fname, "band_%d_center_%d_spline_l.dat", iorb, center_idx); FILE* fout_spline_l = fopen(fname, "w"); fprintf(fout_pw, "# r vals(lm)\n"); fprintf(fout_spline_v, "# r vals(lm)\n"); fprintf(fout_spline_g, "# r grads(lm)\n"); fprintf(fout_spline_l, "# r lapls(lm)\n"); // write to file for plotting for (int ip = 0; ip < spline_npoints - 1; ip++) { double r = delta * static_cast<double>(ip); mycenter.SplineInst->evaluate_vgl(r, mycenter.localV, mycenter.localG, mycenter.localL); fprintf(fout_pw, "%15.10lf ", r); fprintf(fout_spline_v, "%15.10lf ", r); fprintf(fout_spline_g, "%15.10lf ", r); fprintf(fout_spline_l, "%15.10lf ", r); for (int lm = 0; lm < lm_tot; lm++) { fprintf(fout_pw, "%15.10lf %15.10lf ", all_vals[center_idx][ip][lm].real(), all_vals[center_idx][ip][lm].imag()); fprintf(fout_spline_v, "%15.10lf %15.10lf ", mycenter.localV[lm * mycenter.Npad + iorb * 2], mycenter.localV[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_g, "%15.10lf %15.10lf ", mycenter.localG[lm * mycenter.Npad + iorb * 2], mycenter.localG[lm * mycenter.Npad + iorb * 2 + 1]); fprintf(fout_spline_l, "%15.10lf %15.10lf ", mycenter.localL[lm * mycenter.Npad + iorb * 2], mycenter.localL[lm * mycenter.Npad + iorb * 2 + 1]); } fprintf(fout_pw, "\n"); fprintf(fout_spline_v, "\n"); fprintf(fout_spline_g, "\n"); fprintf(fout_spline_l, "\n"); } fclose(fout_pw); fclose(fout_spline_v); fclose(fout_spline_g); fclose(fout_spline_l); #endif } } }; } // namespace qmcplusplus #endif
maxcolnorm.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../admm.h" #include <math.h> /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ /** * @brief The proximal update for column max-normalization. This routine * normalizes each factor column *if* the norm grows larger than 1. * * @param[out] primal The row-major matrix to update. * @param nrows The number of rows in primal. * @param ncols The number of columns in primal. * @param offset Not used. * @param data Not used. * @param rho Not used. * @param should_parallelize Should be true. */ void splatt_maxcolnorm_prox( val_t * primal, idx_t const nrows, idx_t const ncols, idx_t const offset, void * data, val_t const rho, bool const should_parallelize) { assert(should_parallelize); #pragma omp parallel for schedule(dynamic, 1) for(idx_t j=0; j < ncols; ++j) { double norm = 0.; for(idx_t i=0; i < nrows; ++i) { idx_t const index = j + (i*ncols); norm += primal[index] * primal[index]; } norm = sqrt(norm); if(norm <= 1.) { continue; } for(idx_t i=0; i < nrows; ++i) { idx_t const index = j + (i*ncols); primal[index] /= norm; } } } /****************************************************************************** * API FUNCTIONS *****************************************************************************/ splatt_error_type splatt_register_maxcolnorm( splatt_cpd_opts * opts, splatt_idx_t const * const modes_included, splatt_idx_t const num_modes) { for(idx_t m = 0; m < num_modes; ++m) { idx_t const mode = modes_included[m]; splatt_cpd_constraint * con = splatt_alloc_constraint(SPLATT_CON_ADMM); /* only fill the details that are used */ con->prox_func = splatt_maxcolnorm_prox; /* set hints to assist optimizations */ con->hints.row_separable = false; con->hints.sparsity_inducing = false; sprintf(con->description, "MAX-COL-NORM"); /* add to the CPD factorization */ splatt_register_constraint(opts, mode, con); } return SPLATT_SUCCESS; }
graph_io.h
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> and Pingfan Li <lipingfan@163.com> #include <vector> #include <set> #include <iostream> #include <fstream> #include <sstream> #include <string.h> #include <algorithm> //#include <iomanip> typedef float WeightT; struct Edge { int dst; WeightT wt; }; bool compare_id(Edge a, Edge b) { return (a.dst < b.dst); } void fill_data(int m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, vector<vector<Edge> > vertices, bool symmetrize, bool sorted, bool remove_selfloops, bool remove_redundents) { //sort the neighbor list if(sorted) { printf("Sorting the neighbor lists..."); for(int i = 0; i < m; i++) { std::sort(vertices[i].begin(), vertices[i].end(), compare_id); } printf(" Done\n"); } //remove self loops int num_selfloops = 0; if(remove_selfloops) { printf("Removing self loops..."); for(int i = 0; i < m; i++) { for(unsigned j = 0; j < vertices[i].size(); j ++) { if(i == vertices[i][j].dst) { vertices[i].erase(vertices[i].begin()+j); num_selfloops ++; j --; } } } printf(" %d selfloops are removed\n", num_selfloops); } // remove redundent int num_redundents = 0; if(remove_redundents) { printf("Removing redundent edges..."); for (int i = 0; i < m; i++) { for (unsigned j = 1; j < vertices[i].size(); j ++) { if (vertices[i][j].dst == vertices[i][j-1].dst) { vertices[i].erase(vertices[i].begin()+j); num_redundents ++; j --; } } } printf(" %d redundent edges are removed\n", num_redundents); } /* // print some neighbor lists for (int i = 0; i < 3; i++) { cout << "src " << i << ": "; for (int j = 0; j < vertices[i].size(); j ++) cout << vertices[i][j].dst << " "; cout << endl; } */ row_offsets = (int *)malloc((m + 1) * sizeof(int)); int count = 0; for (int i = 0; i < m; i++) { row_offsets[i] = count; count += vertices[i].size(); } row_offsets[m] = count; if (symmetrize) { if(count != nnz) { nnz = count; } } else { if (count + num_selfloops + num_redundents != nnz) printf("Error reading graph, number of edges in edge list %d != %d\n", count, nnz); nnz = count; } printf("num_vertices %d num_edges %d\n", m, nnz); /* double avgdeg; double variance = 0.0; int maxdeg = 0; int mindeg = m; avgdeg = (double)nnz / m; for (int i = 0; i < m; i++) { int deg_i = row_offsets[i + 1] - row_offsets[i]; if (deg_i > maxdeg) maxdeg = deg_i; if (deg_i < mindeg) mindeg = deg_i; variance += (deg_i - avgdeg) * (deg_i - avgdeg) / m; } printf("min_degree %d max_degree %d avg_degree %.2f variance %.2f\n", mindeg, maxdeg, avgdeg, variance); */ column_indices = (int *)malloc(count * sizeof(int)); weight = (WeightT *)malloc(count * sizeof(WeightT)); vector<Edge>::iterator neighbor_list; for (int i = 0, index = 0; i < m; i++) { neighbor_list = vertices[i].begin(); while (neighbor_list != vertices[i].end()) { column_indices[index] = (*neighbor_list).dst; weight[index] = (*neighbor_list).wt; index ++; neighbor_list ++; } } /* // print some neighbor lists for (int i = 0; i < 6; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; cout << "src " << i << ": "; for (int j = row_begin; j < row_end; j ++) cout << column_indices[j] << " "; cout << endl; } //*/ //for (int i = 0; i < 10; i++) cout << weight[i] << ", "; //cout << endl; } // transfer R-MAT generated gr graph to CSR format void gr2csr(char *gr, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading RMAT (.gr) input file %s\n", gr); std::ifstream cfile; cfile.open(gr); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == 'c') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } char sp[3]; sscanf(str.c_str(), "%c %s %d %d", &c, sp, &m, &nnz); vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); int src, dst; for (int i = 0; i < nnz; i++) { getline(cfile, str); sscanf(str.c_str(), "%c %d %d", &c, &src, &dst); if (c != 'a') printf("line %d\n", __LINE__); src--; dst--; Edge e1, e2; if(symmetrize) { e2.dst = src; e2.wt = 1; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } } fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer *.graph file to CSR format void graph2csr(char *graph, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading .graph input file %s\n", graph); std::ifstream cfile; cfile.open(graph); std::string str; getline(cfile, str); sscanf(str.c_str(), "%d %d", &m, &nnz); vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); int dst; for (int src = 0; src < m; src ++) { getline(cfile, str); istringstream istr; istr.str(str); while(istr>>dst) { dst --; Edge e1, e2; if(symmetrize) { e2.dst = src; e2.wt = 1; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } } istr.clear(); } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer mtx graph to CSR format void mtx2csr(char *mtx, int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading (.mtx) input file %s\n", mtx); std::ifstream cfile; cfile.open(mtx); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == '%') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } sscanf(str.c_str(), "%d %d %d", &m, &n, &nnz); if (m != n) { printf("Warning, m(%d) != n(%d)\n", m, n); } vector<vector<Edge> > vertices; vector<Edge> neighbors; for (int i = 0; i < m; i ++) vertices.push_back(neighbors); int dst, src; WeightT wt = 1.0f; for (int i = 0; i < nnz; i ++) { getline(cfile, str); int num = sscanf(str.c_str(), "%d %d %f", &src, &dst, &wt); if (num == 2) wt = 1; if (wt < 0) wt = -wt; // non-negtive weight src--; dst--; Edge e1, e2; if(symmetrize && src != dst) { e2.dst = src; e2.wt = wt; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = wt; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = wt; vertices[dst].push_back(e1); } } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } /* void sort_neighbors(int m, int *row_offsets, int *&column_indices) { vector<int> neighbors; #pragma omp parallel for for(int i = 0; i < m; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; for (int offset = row_begin; offset < row_end; ++ offset) { neighbors.push_back(column_indices[offset]); } std::sort(neighbors.begin(), neighbors.end()); int k = 0; for (int offset = row_begin; offset < row_end; ++ offset) { column_indices[offset] = neighbors[k++]; } } } */ void read_graph(int argc, char *argv[], int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, int *&degree, WeightT *&weight, bool is_symmetrize=false, bool is_transpose=false, bool sorted=true, bool remove_selfloops=true, bool remove_redundents=true) { //if(is_symmetrize) printf("Requiring symmetric graphs for this algorithm\n"); if (strstr(argv[1], ".mtx")) mtx2csr(argv[1], m, n, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".graph")) graph2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".gr")) gr2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else { printf("Unrecognizable input file format\n"); exit(0); } printf("Calculating degree..."); degree = (int *)malloc(m * sizeof(int)); for (int i = 0; i < m; i++) { degree[i] = row_offsets[i + 1] - row_offsets[i]; } printf(" Done\n"); }
pr66199-6.c
/* PR middle-end/66199 */ /* { dg-do run } */ /* { dg-options "-O2 -fopenmp" } */ #pragma omp declare target int u[1024], v[1024], w[1024]; #pragma omp end declare target __attribute__((noinline, noclone)) long f2 (long a, long b, long c) { long d, e; #pragma omp target map(from: d, e) #pragma omp teams distribute parallel for default(none) firstprivate (a, b, c) shared(u, v, w) lastprivate(d, e) for (d = a; d < b; d++) { u[d] = v[d] + w[d]; e = c + d * 5; } return d + e; } __attribute__((noinline, noclone)) long f3 (long a1, long b1, long a2, long b2) { long d1, d2; #pragma omp target map(from: d1, d2) #pragma omp teams distribute parallel for default(none) firstprivate (a1, b1, a2, b2) shared(u, v, w) lastprivate(d1, d2) collapse(2) for (d1 = a1; d1 < b1; d1++) for (d2 = a2; d2 < b2; d2++) u[d1 * 32 + d2] = v[d1 * 32 + d2] + w[d1 * 32 + d2]; return d1 + d2; } int main () { if (f2 (0, 1024, 17) != 1024 + (17 + 5 * 1023) || f3 (0, 32, 0, 32) != 64) __builtin_abort (); return 0; }
StochasticMaxPooling.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <vector> #include <random> #include "bb/MaxPooling.h" namespace bb { // MaxPoolingクラス template <typename FT = float, typename BT = float> class StochasticMaxPooling : public MaxPooling<FT, BT> { using _super = MaxPooling<FT, BT>; public: static inline std::string ModelName(void) { return "StochasticMaxPooling"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; index_t m_filter_h_size; index_t m_filter_w_size; index_t m_input_w_size; index_t m_input_h_size; index_t m_input_c_size; index_t m_output_w_size; index_t m_output_h_size; index_t m_output_c_size; indices_t m_input_shape; indices_t m_output_shape; FrameBuffer m_x_buf; protected: StochasticMaxPooling(index_t filter_h_size, index_t filter_w_size) { m_filter_h_size = filter_h_size; m_filter_w_size = filter_w_size; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } public: ~StochasticMaxPooling() {} static std::shared_ptr<StochasticMaxPooling> Create(index_t filter_h_size=1, index_t filter_w_size=1) { auto self = std::shared_ptr<StochasticMaxPooling>(new StochasticMaxPooling(filter_h_size, filter_w_size)); return self; } index_t GetFilterHeight(void) const override { return m_filter_h_size; } index_t GetFilterWidth(void) const override { return m_filter_w_size; } /** * @brief 入力形状設定 * @detail 入力形状を設定する * 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする * 同一形状を指定しても内部変数は初期化されるものとする * @param shape 1フレームのノードを構成するshape * @return 出力形状を返す */ indices_t SetInputShape(indices_t shape) { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } BB_ASSERT(shape.size() == 3); m_input_c_size = shape[0]; m_input_h_size = shape[1]; m_input_w_size = shape[2]; m_output_w_size = (m_input_w_size + m_filter_w_size - 1) / m_filter_w_size; m_output_h_size = (m_input_h_size + m_filter_h_size - 1) / m_filter_h_size; m_output_c_size = m_input_c_size; m_input_shape = shape; m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } protected: /* inline void* GetInputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x) { return buf.Lock((c*m_input_h_size + y)*m_input_w_size + x); } inline void* GetOutputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x) { return buf.Lock((c*m_output_h_size + y)*m_output_w_size + x); } */ inline index_t GetInputNode(index_t c, index_t y, index_t x) { return (c * m_input_h_size + y) * m_input_w_size + x; } inline index_t GetOutputNode(index_t c, index_t y, index_t x) { return (c * m_output_h_size + y) * m_output_w_size + x; } public: FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { BB_ASSERT(x_buf.GetType() == DataType<FT>::type); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetShape() != m_input_shape) { SetInputShape(x_buf.GetShape()); } // backwardの為に保存 if ( train ) { m_x_buf = x_buf; } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type); #ifdef BB_WITH_CUDA // CUDA版 if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_filter_h_size == 2 && m_filter_w_size == 2 && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(true); bbcu_fp32_StochasticMaxPooling2x2_Forward ( (float const *)ptr_x.GetAddr(), (float* )ptr_y.GetAddr(), (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif // 汎用版実装 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { // OR演算を実施(反転してANDを取って、出力反転) BT out_sig = (BT)1.0; for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, {c, iy, ix}); out_sig *= ((BT)1.0 - in_sig); } } } } y_ptr.Set(frame, {c, y, x}, (FT)((BT)1.0 - out_sig)); } } } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return FrameBuffer(); } BB_ASSERT(dy_buf.GetType() == DataType<BT>::type); BB_ASSERT(dy_buf.GetShape().size() == 3); FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<BT>::type); FrameBuffer x_buf = m_x_buf; m_x_buf = FrameBuffer(); #ifdef BB_WITH_CUDA if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_filter_h_size == 2 && m_filter_w_size == 2 && x_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_fp32_StochasticMaxPooling2x2_Backward ( (float const *)ptr_x.GetAddr(), (float const *)ptr_dy.GetAddr(), (float *)ptr_dx.GetAddr(), (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif // 汎用版実装 if ( m_filter_h_size == 2 && m_filter_w_size == 2 ) { auto x_ptr = x_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { BT in_sig[2][2] = {{0, 0}, {0, 0}}; for (index_t fy = 0; fy < 2; ++fy) { index_t iy = y*2 + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < 2; ++fx) { index_t ix = x*2 + fx; if ( ix < m_input_w_size ) { in_sig[fy][fx] = (BT)x_ptr.Get(frame, c, iy, ix); } } } } BT out_grad = dy_ptr.Get(frame, c, y, x); BT in_grad[2][2]; in_grad[0][0] = (BT)(out_grad * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][0]) * (1.0 - in_sig[1][1])); in_grad[0][1] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[1][0]) * (1.0 - in_sig[1][1])); in_grad[1][0] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][1])); in_grad[1][1] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][0])); for (index_t fy = 0; fy < 2; ++fy) { index_t iy = y*2 + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < 2; ++fx) { index_t ix = x*2 + fx; if ( ix < m_input_w_size ) { dx_ptr.Set(frame, c, iy, ix, in_grad[fy][fx]); } } } } } } } } return dx_buf; } // 汎用版実装(未着手:下記は MaxPooling のまま) BB_ASSERT(0); #if 0 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = m_y_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = m_x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { FT out_sig = y_ptr.Get(frame, c, y, x); BT grad = dy_ptr.Get(frame, c, y, x); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, c, iy, ix); dx_ptr.Set(frame, c, iy, ix, (in_sig == out_sig) ? grad : (BT)0); } } } } } } } } return dx_buf; } #endif } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_filter_h_size); bb::SaveValue(os, m_filter_w_size); bb::SaveValue(os, m_input_w_size); bb::SaveValue(os, m_input_h_size); bb::SaveValue(os, m_input_c_size); bb::SaveValue(os, m_output_w_size); bb::SaveValue(os, m_output_h_size); bb::SaveValue(os, m_output_c_size); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_filter_h_size); bb::LoadValue(is, m_filter_w_size); bb::LoadValue(is, m_input_c_size); bb::LoadValue(is, m_input_h_size); bb::LoadValue(is, m_input_w_size); bb::LoadValue(is, m_output_c_size); bb::LoadValue(is, m_output_h_size); bb::LoadValue(is, m_output_w_size); // 再構築 m_input_shape = indices_t({m_input_c_size, m_input_h_size, m_input_w_size}); m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); } }; }
omp_quiesce_test_2.c
// input number of thread #include <stdio.h> #include <stdlib.h> #include <pthread.h> #include <signal.h> #include <omp.h> //Uncomment this if your implementation has a separated header //#include <omp_interop.h> #include <sys/timeb.h> #include <unistd.h> /**Important: make sure you use num_threads clause in parallel direction and set it to the * number of hardware cores, not the number of cores Linux gives or the default from OpenMP * * cat /proc/cpuinfo and check the processor id, core id and CPU model number so you can look up fron internet * Lennon is Xeon CPU E5-2683 v3 @ 2.00GHz, it has two CPU for total 28 cores, but support upto 56 threads * Paul is Xeon CPU E5-2695 v2 @ 2.40GHz, it has two CPU for total 24 cores, support upto 48 threads * Fornax Intel® Xeon® E5-2699 v3 2.3GHz, it has two CPU for total 36 cores, support upto 72 threads. * * Use -O0 optimization */ int main(int argc, char * argv[]) { int thr_num = 4; if (argc >= 2) thr_num = (atoi(argv[1])); #pragma omp parallel num_threads(thr_num) { int tid = omp_get_thread_num(); if (omp_quiesce() == 0) { printf("Error: omp_quiesce() returns SUCCESS when calling it with a parallel region\n"); } else { printf("Expected: omp_quiesce() returns FAILURE when calling it with a parallel region\n"); } } return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-15,16)),ceild(4*t2-Nz-28,32));t3<=min(min(min(floord(4*t2+Ny,32),floord(Nt+Ny-4,32)),floord(2*t1+Ny+1,32)),floord(4*t1-4*t2+Nz+Ny-1,32));t3++) { for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(32*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(32*t3+Nx+28,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),32*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),32*t3+30),64*t4+62),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributesWithRange &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(Attrs, EndLoc, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); return true; } return false; } bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, EndLoc, LateAttrs); return true; } return false; } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. void ParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D); Attrs.takeAllFrom(AttrsWithRange); } void ParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs, endLoc); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs, End); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributesWithRange &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
timing.c
/*--------------------------------------------------------------------------------- METRIC.C -Performance timing and reporting ---------------------------------------------------------------------------------*/ #include "decs.h" static double timers[NUM_TIMERS]; static double times[NUM_TIMERS]; static int nstep_start = 0; void time_init() { for (int n = 0; n < NUM_TIMERS; n++) { times[n] = 0.; } nstep_start = nstep; } inline void timer_start(int timerCode) { if (TIMERS || timerCode == TIMER_ALL) { #pragma omp master { timers[timerCode] = omp_get_wtime(); } } } inline void timer_stop(int timerCode) { if (TIMERS || timerCode == TIMER_ALL) { #pragma omp master { times[timerCode] += (omp_get_wtime() - timers[timerCode]); } } } // Report a running average of performance data void report_performance() { int steps = nstep - nstep_start; #if TIMERS fprintf(stdout, "\n********** PERFORMANCE **********\n"); fprintf(stdout, " RECON: %8.4g s (%.4g %%)\n", times[TIMER_RECON]/steps, 100.*times[TIMER_RECON]/times[TIMER_ALL]); fprintf(stdout, " LR_TO_F: %8.4g s (%.4g %%)\n", times[TIMER_LR_TO_F]/steps, 100.*times[TIMER_LR_TO_F]/times[TIMER_ALL]); fprintf(stdout, " CMAX: %8.4g s (%.4g %%)\n", times[TIMER_CMAX]/steps, 100.*times[TIMER_CMAX]/times[TIMER_ALL]); fprintf(stdout, " FLUX_CT: %8.4g s (%.4g %%)\n", times[TIMER_FLUX_CT]/steps, 100.*times[TIMER_FLUX_CT]/times[TIMER_ALL]); fprintf(stdout, " UPDATE_U: %8.4g s (%.4g %%)\n", times[TIMER_UPDATE_U]/steps, 100.*times[TIMER_UPDATE_U]/times[TIMER_ALL]); fprintf(stdout, " U_TO_P: %8.4g s (%.4g %%)\n", times[TIMER_U_TO_P]/steps, 100.*times[TIMER_U_TO_P]/times[TIMER_ALL]); fprintf(stdout, " FIXUP: %8.4g s (%.4g %%)\n", times[TIMER_FIXUP]/steps, 100.*times[TIMER_FIXUP]/times[TIMER_ALL]); fprintf(stdout, " BOUND: %8.4g s (%.4g %%)\n", times[TIMER_BOUND]/steps, 100.*times[TIMER_BOUND]/times[TIMER_ALL]); fprintf(stdout, " BOUND_COMMS: %8.4g s (%.4g %%)\n", times[TIMER_BOUND_COMMS]/steps, 100.*times[TIMER_BOUND_COMMS]/times[TIMER_ALL]); fprintf(stdout, " DIAG: %8.4g s (%.4g %%)\n", times[TIMER_DIAG]/steps, 100.*times[TIMER_DIAG]/times[TIMER_ALL]); fprintf(stdout, " IO: %8.4g s (%.4g %%)\n", times[TIMER_IO]/steps, 100.*times[TIMER_IO]/times[TIMER_ALL]); fprintf(stdout, " RESTART: %8.4g s (%.4g %%)\n", times[TIMER_RESTART]/steps, 100.*times[TIMER_RESTART]/times[TIMER_ALL]); fprintf(stdout, " CURRENT: %8.4g s (%.4g %%)\n", times[TIMER_CURRENT]/steps, 100.*times[TIMER_CURRENT]/times[TIMER_ALL]); fprintf(stdout, " LR_STATE: %8.4g s (%.4g %%)\n", times[TIMER_LR_STATE]/steps, 100.*times[TIMER_LR_STATE]/times[TIMER_ALL]); fprintf(stdout, " LR_PTOF: %8.4g s (%.4g %%)\n", times[TIMER_LR_PTOF]/steps, 100.*times[TIMER_LR_PTOF]/times[TIMER_ALL]); fprintf(stdout, " LR_VCHAR: %8.4g s (%.4g %%)\n", times[TIMER_LR_VCHAR]/steps, 100.*times[TIMER_LR_VCHAR]/times[TIMER_ALL]); fprintf(stdout, " LR_CMAX: %8.4g s (%.4g %%)\n", times[TIMER_LR_CMAX]/steps, 100.*times[TIMER_LR_CMAX]/times[TIMER_ALL]); fprintf(stdout, " LR_FLUX: %8.4g s (%.4g %%)\n", times[TIMER_LR_FLUX]/steps, 100.*times[TIMER_LR_FLUX]/times[TIMER_ALL]); #if ELECTRONS fprintf(stdout, " E_HEAT: %8.4g s (%.4g %%)\n", times[TIMER_ELECTRON_HEAT]/steps, 100.*times[TIMER_ELECTRON_HEAT]/times[TIMER_ALL]); fprintf(stdout, " E_FIXUP: %8.4g s (%.4g %%)\n", times[TIMER_ELECTRON_FIXUP]/steps, 100.*times[TIMER_ELECTRON_FIXUP]/times[TIMER_ALL]); #endif #endif fprintf(stdout, " ALL: %8.4g s\n", times[TIMER_ALL]/steps); fprintf(stdout, " ZONE CYCLES PER\n"); fprintf(stdout, " CORE-SECOND: %e\n", N1*N2/(times[TIMER_ALL]*nthreads/steps)); fprintf(stdout, " SECOND: %e\n", N1*N2/(times[TIMER_ALL]/steps)); }
GB_unaryop__lnot_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint8 // op(A') function: GB_tran__lnot_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint8 ( int16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_fp32) // op(A') function: GB (_unop_tran__identity_int16_fp32) // C type: int16_t // A type: float // cast: int16_t cij = GB_cast_to_int16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = GB_cast_to_int16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = GB_cast_to_int16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_fp32) ( int16_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int16_t z = GB_cast_to_int16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
triplet_iw.c
/* Copyright (C) 2016 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stddef.h> #include <math.h> #include <phonoc_utils.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_iw.h> #include <tetrahedron_method.h> static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, TPLCONST size_t vertices[2][24][4], const int num_band1, const int num_band2, const int b1, const int b2, const size_t tp_type); static int set_g(double g[3], const double f0, TPLCONST double freq_vertices[3][24][4], const size_t max_i); static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4]); static void get_triplet_tetrahedra_vertices( size_t vertices[2][24][4], TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplet[3], TPLCONST int (*bz_grid_address)[3], const size_t *bz_map); void tpi_get_integration_weight(double *iw, char *iw_zero, const double *frequency_points, const size_t num_band0, TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplets[3], const size_t num_triplets, TPLCONST int (*bz_grid_address)[3], const size_t *bz_map, const double *frequencies1, const size_t num_band1, const double *frequencies2, const size_t num_band2, const size_t tp_type, const int openmp_per_bands) { size_t max_i, j, b1, b2, b12, num_band_prod, adrs_shift; size_t vertices[2][24][4]; double g[3]; double freq_vertices[3][24][4]; get_triplet_tetrahedra_vertices(vertices, tp_relative_grid_address, mesh, triplets, bz_grid_address, bz_map); num_band_prod = num_triplets * num_band0 * num_band1 * num_band2; /* tp_type: Type of integration weights stored */ /* */ /* g0 -> \delta(f0 - (-f1 + f2)) */ /* g1 -> \delta(f0 - (f1 - f2)) */ /* g2 -> \delta(f0 - (f1 + f2)) */ /* */ /* tp_type = 2: (g[2], g[0] - g[1]) mainly for ph-ph */ /* tp_type = 3: (g[2], g[0] - g[1], g[0] + g[1] + g[2]) mainly for ph-ph */ /* tp_type = 4: (g[0]) mainly for el-ph phonon decay, */ /* f0: ph, f1: el_i, f2: el_f */ if ((tp_type == 2) || (tp_type == 3)) { max_i = 3; } if (tp_type == 4) { max_i = 1; } #pragma omp parallel for private(j, b1, b2, adrs_shift, g, freq_vertices) if (openmp_per_bands) for (b12 = 0; b12 < num_band1 * num_band2; b12++) { b1 = b12 / num_band2; b2 = b12 % num_band2; set_freq_vertices(freq_vertices, frequencies1, frequencies2, vertices, num_band1, num_band2, b1, b2, tp_type); for (j = 0; j < num_band0; j++) { adrs_shift = j * num_band1 * num_band2 + b1 * num_band2 + b2; iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices, max_i); if (tp_type == 2) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; } if (tp_type == 3) { iw[adrs_shift] = g[2]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] - g[1]; adrs_shift += num_band_prod; iw[adrs_shift] = g[0] + g[1] + g[2]; } if (tp_type == 4) { iw[adrs_shift] = g[0]; } } } } void tpi_get_integration_weight_with_sigma(double *iw, char *iw_zero, const double sigma, const double cutoff, const double *frequency_points, const size_t num_band0, const size_t triplet[3], const size_t const_adrs_shift, const double *frequencies, const size_t num_band, const size_t tp_type, const int openmp_per_bands) { size_t j, b12, b1, b2, adrs_shift; double f0, f1, f2, g0, g1, g2; #pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, adrs_shift) if (openmp_per_bands) for (b12 = 0; b12 < num_band * num_band; b12++) { b1 = b12 / num_band; b2 = b12 % num_band; f1 = frequencies[triplet[1] * num_band + b1]; f2 = frequencies[triplet[2] * num_band + b2]; for (j = 0; j < num_band0; j++) { f0 = frequency_points[j]; adrs_shift = j * num_band * num_band + b1 * num_band + b2; if ((tp_type == 2) || (tp_type == 3)) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff && fabs(f0 - f1 + f2) > cutoff && fabs(f0 - f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; g0 = 0; g1 = 0; g2 = 0; } else { iw_zero[adrs_shift] = 0; g0 = gaussian(f0 + f1 - f2, sigma); g1 = gaussian(f0 - f1 + f2, sigma); g2 = gaussian(f0 - f1 - f2, sigma); } if (tp_type == 2) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; } if (tp_type == 3) { iw[adrs_shift] = g2; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 - g1; adrs_shift += const_adrs_shift; iw[adrs_shift] = g0 + g1 + g2; } } if (tp_type == 4) { if (cutoff > 0 && fabs(f0 + f1 - f2) > cutoff) { iw_zero[adrs_shift] = 1; iw[adrs_shift] = 0; } else { iw_zero[adrs_shift] = 0; iw[adrs_shift] = gaussian(f0 + f1 - f2, sigma); } } } } } static void set_freq_vertices(double freq_vertices[3][24][4], const double *frequencies1, const double *frequencies2, TPLCONST size_t vertices[2][24][4], const int num_band1, const int num_band2, const int b1, const int b2, const size_t tp_type) { int i, j; double f1, f2; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { f1 = frequencies1[vertices[0][i][j] * num_band1 + b1]; f2 = frequencies2[vertices[1][i][j] * num_band2 + b2]; if ((tp_type == 2) || (tp_type == 3)) { if (f1 < 0) {f1 = 0;} if (f2 < 0) {f2 = 0;} freq_vertices[1][i][j] = f1 - f2; freq_vertices[2][i][j] = f1 + f2; } freq_vertices[0][i][j] = -f1 + f2; } } } static int set_g(double g[3], const double f0, TPLCONST double freq_vertices[3][24][4], const size_t max_i) { int i, iw_zero; iw_zero = 1; for (i = 0; i < max_i; i++) { if (in_tetrahedra(f0, freq_vertices[i])) { g[i] = thm_get_integration_weight(f0, freq_vertices[i], 'I'); iw_zero = 0; } else { g[i] = 0; } } return iw_zero; } static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4]) { int i, j; double fmin, fmax; fmin = freq_vertices[0][0]; fmax = freq_vertices[0][0]; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { if (fmin > freq_vertices[i][j]) { fmin = freq_vertices[i][j]; } if (fmax < freq_vertices[i][j]) { fmax = freq_vertices[i][j]; } } } if (fmin > f0 || fmax < f0) { return 0; } else { return 1; } } static void get_triplet_tetrahedra_vertices( size_t vertices[2][24][4], TPLCONST int tp_relative_grid_address[2][24][4][3], const int mesh[3], const size_t triplet[3], TPLCONST int (*bz_grid_address)[3], const size_t *bz_map) { int i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 24; j++) { thm_get_dense_neighboring_grid_points(vertices[i][j], triplet[i + 1], tp_relative_grid_address[i][j], 4, mesh, bz_grid_address, bz_map); } } }
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4); int p = 0; for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to1.channel(p / 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { #if __AVXVNNI__ || __AVX512VNNI__ || __XOP__ for (int i = 0; i < 4; i++) { const short* k00 = k0.row<const short>(q + i * 2); const short* k10 = k1.row<const short>(q + i * 2); const short* k20 = k2.row<const short>(q + i * 2); const short* k30 = k3.row<const short>(q + i * 2); const short* k01 = k0.row<const short>(q + i * 2 + 1); const short* k11 = k1.row<const short>(q + i * 2 + 1); const short* k21 = k2.row<const short>(q + i * 2 + 1); const short* k31 = k3.row<const short>(q + i * 2 + 1); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k10[k]; g00[3] = k11[k]; g00[4] = k20[k]; g00[5] = k21[k]; g00[6] = k30[k]; g00[7] = k31[k]; g00 += 8; } #else for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00 += 4; } #endif } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd42_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt); return; } #endif int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0); __m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32)); __m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01); __m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03); __m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05); __m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001); __m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001); __m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203); __m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203); __m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405); __m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405); __m128i _v5 = _mm_set1_epi16(5); __m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5)); __m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2)); __m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2)); __m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); _mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m); _mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _v5 = _mm_set1_epi16(5); __m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5)); __m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5)); _mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0); _mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1); _mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2); _mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3); _mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4); _mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __AVX2__ if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { short* tmpptr = tm2.row<short>(i / 4); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); _mm256_storeu_si256((__m256i*)tmpptr, _r0); _mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 32; } } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); _mm_storeu_si128((__m128i*)tmpptr, _r0); _mm_storeu_si128((__m128i*)(tmpptr + 8), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #else short* tmpptr = tm2.row<short>(i / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); _mm_storeu_si128((__m128i*)tmpptr, _r0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p / 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); __m256i _sum4_5 = _mm256_setzero_si256(); __m256i _sum6_7 = _mm256_setzero_si256(); for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef); #else // 0 0 1 1 2 2 3 3 8 8 9 9 a a b b // 4 4 5 5 6 6 7 7 c c d d e e f f __m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0); __m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0); __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif __m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef); #else __m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1); __m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1); __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123); __m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123); __m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab); __m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab); __m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567); __m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567); __m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef); __m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17)); #endif r0 += 32; k0 += 32; } __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1)); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); int sum[16]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); _mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm[2] = sum[8]; output1_tm[2] = sum[9]; output2_tm[2] = sum[10]; output3_tm[2] = sum[11]; output0_tm[3] = sum[12]; output1_tm[3] = sum[13]; output2_tm[3] = sum[14]; output3_tm[3] = sum[15]; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { #if __AVX2__ // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef); #else __m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val); __m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val); __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif #else // 0 1 2 3 4 5 6 7 __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3)); __m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2); _sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3); _sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1); _sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0); __m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0); __m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1); __m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1); __m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123); __m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123); __m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567); __m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567); __m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123); __m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123); __m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567); __m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567); __m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01); __m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01); __m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01); __m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01); __m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23); __m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23); __m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23); __m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23); __m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45); __m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45); __m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45); __m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45); __m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67); __m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67); __m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67); __m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif r0 += 16; k0 += 32; } #if __AVX2__ __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); int sum[8]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum0); _mm_storeu_si128((__m128i*)(sum + 4), _sum2); #endif output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm += 2; output1_tm += 2; output2_tm += 2; output3_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 __m128i _val = _mm_loadu_si128((const __m128i*)r0); #if __AVX2__ __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ // 0 1 0 1 x x x x // 0 1 0 1 0 1 0 1 __m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); __m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1); __m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val)); __m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val)); _val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); _val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); #endif #else __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val23, _w1, _sum1); _sum0 = _mm_maddd_epi16(_val45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val67, _w3, _sum1); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val_0123 = _mm_unpacklo_epi16(_val, _val); __m128i _val_4567 = _mm_unpackhi_epi16(_val, _val); __m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123); __m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123); __m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567); __m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567); __m128i _sl0 = _mm_mullo_epi16(_w0, _val01); __m128i _sh0 = _mm_mulhi_epi16(_w0, _val01); __m128i _sl1 = _mm_mullo_epi16(_w1, _val23); __m128i _sh1 = _mm_mulhi_epi16(_w1, _val23); __m128i _sl2 = _mm_mullo_epi16(_w2, _val45); __m128i _sh2 = _mm_mulhi_epi16(_w2, _val45); __m128i _sl3 = _mm_mullo_epi16(_w3, _val67); __m128i _sh3 = _mm_mulhi_epi16(_w3, _val67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif r0 += 8; k0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); #endif _sum0 = _mm_add_epi32(_sum0, _sum1); int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); __m128i _sum4 = _mm_setzero_si128(); __m128i _sum5 = _mm_setzero_si128(); __m128i _sum6 = _mm_setzero_si128(); __m128i _sum7 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl2 = _mm_mullo_epi16(_val2, _w0); __m128i _sh2 = _mm_mulhi_epi16(_val2, _w0); __m128i _sl3 = _mm_mullo_epi16(_val3, _w0); __m128i _sh3 = _mm_mulhi_epi16(_val3, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); _sum4 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum5 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum6 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl3, _sh3)); _sum7 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum4 = _mm_add_epi32(_sum4, _sum5); _sum6 = _mm_add_epi32(_sum6, _sum7); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm[2] = _mm_reduce_add_epi32(_sum4); output0_tm[3] = _mm_reduce_add_epi32(_sum6); output0_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val = _mm_loadu_si128((const __m128i*)r0); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); k0 += 8; r0 += 8; } _sum0 = _mm_add_epi32(_sum0, _sum1); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO sse optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
Example_tasking.5.c
/* * @@name: tasking.5c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ #define LARGE_NUMBER 10000000 double item[LARGE_NUMBER]; extern void process(double); int main() { #pragma omp parallel { #pragma omp single { int i; for (i=0; i<LARGE_NUMBER; i++) #pragma omp task // i is firstprivate, item is shared process(item[i]); } } }
gameoflife.c
#include <endian.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <setjmp.h> #include <string.h> #include <stdbool.h> #include <math.h> #include <sys/time.h> #include <omp.h> #define calcIndex(width, x,y) ((y)*(width) + (x)) long TimeSteps = 100; void writeParallelVTK(long timestep, int w, int h, int px, int py) { int nx = w / px; int ny = h / py; char filename[2048]; snprintf(filename, sizeof(filename), "%s-%05ld%s", "parallel", timestep, ".pvti"); FILE *fp = fopen(filename, "w"); fprintf(fp, "<?xml version=\"1.0\"?>\n"); fprintf(fp, "<VTKFile type=\"PImageData\" version=\"0.1\" byte_order=\"LittleEndian\" header_type=\"UInt64\">\n"); fprintf(fp, "<PImageData WholeExtent=\"%d %d %d %d %d %d\" Origin=\"0 0 0\" Spacing=\"%le %le %le\">\n", 0, w, 0, h, 0, 0, 1.0, 1.0, 0.0); fprintf(fp, "<PCellData Scalars=\"%s\">\n", "gol"); fprintf(fp, "<PDataArray type=\"Float32\" Name=\"%s\" format=\"appended\" offset=\"0\"/>\n", "gol"); fprintf(fp, "</PCellData>\n"); for (int i = 0; i < nx * ny; i++) { int xOffset = (i % nx) * px; int yOffset = (i / nx) * py; fprintf(fp, "<Piece Extent=\"%d %d %d %d %d %d\" Source=\"%s-%d-%05ld%s\"/>", xOffset, xOffset + px, yOffset, yOffset + py, 0, 0, "gol", i, timestep, ".vti"); } fprintf(fp, "</PImageData>\n"); fprintf(fp, "</VTKFile>\n"); fclose(fp); } void writeVTK2(long timestep, double *data, char prefix[1024], int w, int h, int Gw, int offsetX, int offsetY, int num) { char filename[2048]; int x,y; //int offsetX=0; //int offsetY=0; float deltax=1.0; long nxy = w * h * sizeof(float); snprintf(filename, sizeof(filename), "%s-%d-%05ld%s", prefix, num, timestep, ".vti"); FILE* fp = fopen(filename, "w"); fprintf(fp, "<?xml version=\"1.0\"?>\n"); fprintf(fp, "<VTKFile type=\"ImageData\" version=\"0.1\" byte_order=\"LittleEndian\" header_type=\"UInt64\">\n"); fprintf(fp, "<ImageData WholeExtent=\"%d %d %d %d %d %d\" Origin=\"0 0 0\" Spacing=\"%le %le %le\">\n", offsetX, offsetX + w, offsetY, offsetY + h, 0, 0, deltax, deltax, 0.0); fprintf(fp, "<CellData Scalars=\"%s\">\n", prefix); fprintf(fp, "<DataArray type=\"Float32\" Name=\"%s\" format=\"appended\" offset=\"0\"/>\n", prefix); fprintf(fp, "</CellData>\n"); fprintf(fp, "</ImageData>\n"); fprintf(fp, "<AppendedData encoding=\"raw\">\n"); fprintf(fp, "_"); fwrite((unsigned char*)&nxy, sizeof(long), 1, fp); for (y = offsetY; y < h + offsetY; y++) { for (x = offsetX; x < w + offsetX; x++) { float value = data[calcIndex(Gw, x,y)]; fwrite((unsigned char*)&value, sizeof(float), 1, fp); } } fprintf(fp, "\n</AppendedData>\n"); fprintf(fp, "</VTKFile>\n"); fclose(fp); } void show(double* currentfield, int w, int h) { printf("\033[H"); int x,y; for (y = 0; y < h; y++) { for (x = 0; x < w; x++) printf(currentfield[calcIndex(w, x,y)] ? "\033[07m \033[m" : " "); //printf("\033[E"); printf("\n"); } fflush(stdout); } // anpassung void evolve(double* currentfield, double* newfield, int w, int h, int pX, int pY, long t) { writeParallelVTK(t, w, h, pX, pY); #pragma omp parallel num_threads((h/pY) * (w/pX)) { int threadId = omp_get_thread_num(); int nX = w/pX; int nY = h/pY; int rectangleY = (nY-1) - threadId/nX; int rectangleX = threadId % nX; int offsetX = pX * rectangleX; int offsetY = pY * rectangleY; for (int y = offsetY; y < offsetY + pY; y++) { for (int x = offsetX; x < offsetX + pX; x++) { int neighbourCount = 0; for (int y1 = y - 1; y1 <= y + 1; y1++) for (int x1 = x - 1; x1 <= x + 1; x1++) if (x1 >= 0 && x1 < w && y1 >= 0 && y1 < h && (x1 != x || y1 != y) && currentfield[calcIndex(w, x1, y1)]) neighbourCount++; newfield[calcIndex(w, x,y)] = neighbourCount == 3 || currentfield[calcIndex(w, x,y)] && neighbourCount == 2; } } writeVTK2(t,currentfield,"gol", pX, pY, w, offsetX, offsetY, (rectangleY * (w / pX)) + rectangleX); } } void filling(double* currentfield, int w, int h, char *path) { if (path == 0){ int i; for (i = 0; i < h*w; i++) { currentfield[i] = (rand() < RAND_MAX / 10) ? 1 : 0; ///< init domain randomly } return; } FILE *file = fopen(path, "r"); if(file == 0){ printf("File not found"); exit(0); } while(getc(file) == '#'){ while(getc(file) != '\n'); } char c; ///x while(getc(file) != '='); getc(file); int x = 0; while((c = getc(file)) != ','){ x = x * 10 + c - '0'; } ///y while(getc(file) != '='); getc(file); int y = 0; while((c = getc(file)) != ','){ y = y * 10 + c - '0'; } if(x != w || y != h){ fclose(file); printf("Wrong sizes"); exit(0); } while(getc(file) != '\n'); x = 0; y = 0; c = getc(file); while(c != '!'){ while(c != '$' && c != '!'){ int rl = 1; if(c >= '0' && c <= '9'){ rl = 0; while(c != 'b' && c != 'o'){ rl = rl * 10 + c - '0'; c = getc(file); while(c == '\n'){ c = getc(file); } } } for(int index = 0; index < rl; index++) { currentfield[calcIndex(w, x, y)] = c == 'o'; x++; } c = getc(file); if(c == '\n'){ c = getc(file); } } if(c == '$'){ c = getc(file); if(c == '\n'){ c = getc(file); } } x = 0; y++; } } void game(int w, int h, int pX, int pY, char *path) { double *currentfield = calloc(w*h, sizeof(double)); double *newfield = calloc(w*h, sizeof(double)); //printf("size unsigned %d, size long %d\n",sizeof(float), sizeof(long)); filling(currentfield, w, h, path); long t; for (t=0;t<TimeSteps;t++) { //show(currentfield, w, h); evolve(currentfield, newfield, w, h, pX, pY, t); //printf("%ld timestep\n",t); //writeVTK2(t,currentfield,"gol", w, h); //usleep(200000); //SWAP double *temp = currentfield; currentfield = newfield; newfield = temp; } free(currentfield); free(newfield); } int main(int c, char **v) { int nX = 0, nY = 0, pX = 0, pY = 0; char *path = 0; if (c > 1) TimeSteps = atoi(v[1]); ///< read timesteps if (c > 2) nX = atoi(v[2]); ///< read nX if (c > 3) nY = atoi(v[3]); ///< read nY if (c > 4) pX = atoi(v[4]); ///< read pX if (c > 5) pY = atoi(v[5]); ///< read pY if (c > 6) path = v[6]; ///< read path if (nX <= 0) nX = 2; ///< default nX if (nY <= 0) nY = 2; ///< default nY if (pX <= 0) pX = 8; ///< default width if (pY <= 0) pY = 8; ///< default height int w = nX * pX; int h = nY * pY; //printf("Start Game of Life with w=%d h=%d\n", w, h); game(w, h, pX, pY, path); }
omp_parallel_for_lastprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for lastprivate directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for lastprivate</ompts:directive> <ompts:dependences>omp parallel for reduction,omp parallel for private</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_lastprivate</ompts:testcode:functionname>(FILE * logFile){ int sum = 0; /*int sum0 = 0;*/ int known_sum; int i; int i0 = -1; #pragma omp parallel for reduction(+:sum) schedule(static,7) <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; i0 = i; } /*end of for*/ /* end of parallel*/ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return ((known_sum == sum) && (i0 == LOOPCOUNT)); } /* end of check_parallel_for_lastprivate */ </ompts:testcode> </ompts:test>
mm.c
#include <string.h> #include <omp.h> #include "mm.h" #define ALIGNED __attribute__((aligned(64))) inline size_t min2(const size_t a, const size_t b) { return (a < b) ? a : b; } #define MBLOCK 224 #define NBLOCK 48 #define KBLOCK (2 * MBLOCK) inline void mm_serial(const size_t m, const size_t n, const size_t k, const csmat_t a, const size_t lda, const csmat_t b, const size_t ldb, const smat_t c, const size_t ldc) { size_t i, j, p, ibstart, jbstart, pbstart; ALIGNED data_t c_local[NBLOCK][MBLOCK]; #ifdef MIC ALIGNED data_t a_local[KBLOCK][MBLOCK]; for (j = 0; j < n; ++j) { memset(c + CIDX(0, j, ldc), 0, sizeof(data_t) * m); } for (pbstart = 0; pbstart < k; pbstart += KBLOCK) { const size_t pbsize = min2(KBLOCK, k - pbstart); for (ibstart = 0; ibstart < m; ibstart += MBLOCK) { const size_t ibsize = min2(MBLOCK, m - ibstart); const cmat_t ab = a + CIDX(ibstart, pbstart, lda); for (p = 0; p < pbsize; ++p) { memcpy(a_local[p], ab + CIDX(0, p, lda), ibsize * sizeof(data_t)); } for (jbstart = 0; jbstart < n; jbstart += NBLOCK) { const size_t jbsize = min2(NBLOCK, n - jbstart); const cmat_t bb = b + CIDX(pbstart, jbstart, ldb); const mat_t cb = c + CIDX(ibstart, jbstart, ldc); for (j = 0; j < jbsize; ++j) { memset(c_local[j], 0, ibsize * sizeof(data_t)); } for (j = 0; j < jbsize; ++j) { for (p = 0; p < pbsize; ++p) { #pragma omp simd for (i = 0; i < ibsize; ++i) { c_local[j][i] += a_local[p][i] * bb[CIDX(p, j, ldb)]; } } } for (j = 0; j < jbsize; ++j) { #pragma omp simd for (i = 0; i < ibsize; ++i) { cb[CIDX(i, j, ldc)] += c_local[j][i]; } } } } } #else ALIGNED data_t b_local[NBLOCK][KBLOCK]; for (j = 0; j < n; ++j) { memset(c + CIDX(0, j, ldc), 0, sizeof(data_t) * m); } for (pbstart = 0; pbstart < k; pbstart += KBLOCK) { const size_t pbsize = min2(KBLOCK, k - pbstart); for (jbstart = 0; jbstart < n; jbstart += NBLOCK) { const size_t jbsize = min2(NBLOCK, n - jbstart); const cmat_t bb = b + CIDX(pbstart, jbstart, ldb); for (j = 0; j < jbsize; ++j) { memcpy(b_local[j], bb + CIDX(0, j, ldb), pbsize * sizeof(data_t)); } for (ibstart = 0; ibstart < m; ibstart += MBLOCK) { const size_t ibsize = min2(MBLOCK, m - ibstart); const cmat_t ab = a + CIDX(ibstart, pbstart, lda); const mat_t cb = c + CIDX(ibstart, jbstart, ldc); for (j = 0; j < jbsize; ++j) { memset(c_local[j], 0, ibsize * sizeof(data_t)); } for (j = 0; j < jbsize; ++j) { for (p = 0; p < pbsize; ++p) { #pragma omp simd for (i = 0; i < ibsize; ++i) { c_local[j][i] += ab[CIDX(i, p, lda)] * b_local[j][p]; } } } for (j = 0; j < jbsize; ++j) { #pragma omp simd for (i = 0; i < ibsize; ++i) { cb[CIDX(i, j, ldc)] += c_local[j][i]; } } } } } #endif } inline size_t ceiling(const size_t den, const size_t num) { return (den - 1) / num + 1; } static size_t partition_of_rows = 4; static size_t partition_of_columns = 6; void mm_set_partition(const size_t pr, const size_t pc) { omp_set_num_threads(pr * pc); partition_of_rows = pr; partition_of_columns = pc; } void matrix_multiply(const size_t m, const size_t n, const size_t k, const csmat_t a, const size_t lda, const csmat_t b, const size_t ldb, const smat_t c, const size_t ldc) { const size_t pr = partition_of_rows; const size_t pc = partition_of_columns; const size_t rbsize_0 = ceiling(m, pr); const size_t cbsize_0 = ceiling(n, pc); #pragma omp parallel shared(m, n, k, a, b, c, pr, pc, lda, ldb, ldc, rbsize_0, cbsize_0) { const size_t tid = (size_t)omp_get_thread_num(); const size_t rid = tid % pr; const size_t cid = tid / pr; const size_t rbstart = rid * rbsize_0; const size_t cbstart = cid * cbsize_0; const size_t rbsize = min2(m - rbstart, rbsize_0); const size_t cbsize = min2(n - cbstart, cbsize_0); mm_serial(rbsize, cbsize, k, a + CIDX(rbstart, 0, lda), lda, b + CIDX(0, cbstart, ldb), ldb, c + CIDX(rbstart, cbstart, ldc), ldc); } }
colormap.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR M M AAA PPPP % % C O O L O O R R MM MM A A P P % % C O O L O O RRRR M M M AAAAA PPPP % % C O O L O O R R M M A A P % % CCCC OOO LLLLL OOO R R M M A A P % % % % % % MagickCore Colormap Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % We use linked-lists because splay-trees do not currently support duplicate % key / value pairs (.e.g X11 green compliance and SVG green compliance). % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/client.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/semaphore.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AcquireImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % The format of the AcquireImageColormap method is: % % MagickBooleanType AcquireImageColormap(Image *image,const size_t colors) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % */ MagickExport MagickBooleanType AcquireImageColormap(Image *image, const size_t colors) { register ssize_t i; /* Allocate image colormap. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->colors=MagickMax(colors,1); if (image->colormap == (PixelPacket *) NULL) image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); else image->colormap=(PixelPacket *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelPacket *) NULL) { image->colors=0; image->storage_class=DirectClass; ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } for (i=0; i < (ssize_t) image->colors; i++) { size_t pixel; pixel=(size_t) (i*(QuantumRange/MagickMax(colors-1,1))); image->colormap[i].red=(Quantum) pixel; image->colormap[i].green=(Quantum) pixel; image->colormap[i].blue=(Quantum) pixel; image->colormap[i].opacity=OpaqueOpacity; } return(SetImageStorageClass(image,PseudoClass)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C y c l e C o l o r m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CycleColormap() displaces an image's colormap by a given number of % positions. If you cycle the colormap a number of times you can produce % a psychodelic effect. % % The format of the CycleColormapImage method is: % % MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace) % % A description of each parameter follows: % % o image: the image. % % o displace: displace the colormap this amount. % */ MagickExport MagickBooleanType CycleColormapImage(Image *image, const ssize_t displace) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == DirectClass) (void) SetImageType(image,PaletteType); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; ssize_t index; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) (GetPixelIndex(indexes+x)+displace) % image->colors; if (index < 0) index+=(ssize_t) image->colors; SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S o r t C o l o r m a p B y I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SortColormapByIntensity() sorts the colormap of a PseudoClass image by % decreasing color intensity. % % The format of the SortColormapByIntensity method is: % % MagickBooleanType SortColormapByIntensity(Image *image) % % A description of each parameter follows: % % o image: A pointer to an Image structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelPacket *color_1, *color_2; int intensity; color_1=(const PixelPacket *) x; color_2=(const PixelPacket *) y; intensity=PixelPacketIntensity(color_2)-(int) PixelPacketIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport MagickBooleanType SortColormapByIntensity(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; register ssize_t i; ssize_t y; unsigned short *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->storage_class != PseudoClass) return(MagickTrue); exception=(&image->exception); /* Allocate memory for pixel indexes. */ pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors, sizeof(*pixels)); if (pixels == (unsigned short *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Assign index values to colormap entries. */ for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(IndexPacket) i; /* Sort image colormap by decreasing color popularity. */ qsort((void *) image->colormap,(size_t) image->colors, sizeof(*image->colormap),IntensityCompare); /* Update image colormap indexes to sorted colormap order. */ for (i=0; i < (ssize_t) image->colors; i++) pixels[(ssize_t) image->colormap[i].opacity]=(unsigned short) i; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register ssize_t x; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) pixels[(ssize_t) GetPixelIndex(indexes+x)]; SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (status == MagickFalse) break; } image_view=DestroyCacheView(image_view); pixels=(unsigned short *) RelinquishMagickMemory(pixels); return(status); }
GB_binop__band_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__band_uint8 // A.*B function (eWiseMult): GB_AemultB__band_uint8 // A*D function (colscale): GB_AxD__band_uint8 // D*A function (rowscale): GB_DxB__band_uint8 // C+=B function (dense accum): GB_Cdense_accumB__band_uint8 // C+=b function (dense accum): GB_Cdense_accumb__band_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_uint8 // C=scalar+B GB_bind1st__band_uint8 // C=scalar+B' GB_bind1st_tran__band_uint8 // C=A+scalar GB_bind2nd__band_uint8 // C=A'+scalar GB_bind2nd_tran__band_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) & (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__band_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__band_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__band_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__band_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__band_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__band_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__band_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__band_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__band_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB_bind1st_tran__band_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB_bind2nd_tran__band_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ten_tusscher_2004_epi_S1_13.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S1_13.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6124649455479,0.00127794843601506,0.780792340373146,0.780621416430051,0.000173708999922449,0.485550218750528,0.00293105929439211,0.999998362618276,1.91930561072561e-08,1.87995323300123e-05,0.999771109571080,1.00717919024407,0.999996460509174,4.32012539733253e-05,0.681225232256513,9.64639490171753,139.835052468258}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.9878532723791,0.000381859667002297,0.000164377584172001,0.000429492452900182,0.281717671480526,0.172178664836313,0.158009524014960,3.54321400489854,0.0185670643252902,2.13545487708985,1099.99990980037,0.000491721845343899,0.419354711210666,0.0199628106883488,0.00141401145930471,3.06197556760024e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
mangrove.c
/*********************************************************************************/ /* */ /* Animation of wave equation in a planar domain */ /* */ /* N. Berglund, december 2012, may 2021 */ /* */ /* UPDATE 24/04: distinction between damping and "elasticity" parameters */ /* UPDATE 27/04: new billiard shapes, bug in color scheme fixed */ /* UPDATE 28/04: code made more efficient, with help of Marco Mancini */ /* */ /* Feel free to reuse, but if doing so it would be nice to drop a */ /* line to nils.berglund@univ-orleans.fr - Thanks! */ /* */ /* compile with */ /* gcc -o wave_billiard wave_billiard.c */ /* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */ /* */ /* OMP acceleration may be more effective after executing */ /* export OMP_NUM_THREADS=2 in the shell before running the program */ /* */ /* To make a video, set MOVIE to 1 and create subfolder tif_wave */ /* It may be possible to increase parameter PAUSE */ /* */ /* create movie using */ /* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */ /* */ /*********************************************************************************/ /*********************************************************************************/ /* */ /* NB: The algorithm used to simulate the wave equation is highly paralellizable */ /* One could make it much faster by using a GPU */ /* */ /*********************************************************************************/ #include <math.h> #include <string.h> #include <GL/glut.h> #include <GL/glu.h> #include <unistd.h> #include <sys/types.h> #include <tiffio.h> /* Sam Leffler's libtiff library. */ #include <omp.h> #define MOVIE 0 /* set to 1 to generate movie */ /* General geometrical parameters */ #define WINWIDTH 1280 /* window width */ #define WINHEIGHT 720 /* window height */ #define NX 640 /* number of grid points on x axis */ #define NY 360 /* number of grid points on y axis */ // #define NX 1280 /* number of grid points on x axis */ // #define NY 720 /* number of grid points on y axis */ #define XMIN -2.0 #define XMAX 2.0 /* x interval */ #define YMIN -1.125 #define YMAX 1.125 /* y interval for 9/16 aspect ratio */ #define JULIA_SCALE 1.0 /* scaling for Julia sets */ /* Choice of the billiard table */ #define B_DOMAIN 20 /* choice of domain shape, see list in global_pdes.c */ // #define CIRCLE_PATTERN 1 /* pattern of circles, see list in global_pdes.c */ #define CIRCLE_PATTERN 8 /* pattern of circles, see list in global_pdes.c */ #define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */ #define NPOISSON 340 /* number of points for Poisson C_RAND_POISSON arrangement */ #define RANDOM_POLY_ANGLE 0 /* set to 1 to randomize angle of polygons */ #define LAMBDA 0.85 /* parameter controlling the dimensions of domain */ #define MU 0.03 /* parameter controlling the dimensions of domain */ #define NPOLY 3 /* number of sides of polygon */ #define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */ #define MDEPTH 4 /* depth of computation of Menger gasket */ #define MRATIO 3 /* ratio defining Menger gasket */ #define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */ #define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */ #define FOCI 1 /* set to 1 to draw focal points of ellipse */ #define NGRIDX 15 /* number of grid point for grid of disks */ #define NGRIDY 20 /* number of grid point for grid of disks */ #define X_SHOOTER -0.2 #define Y_SHOOTER -0.6 #define X_TARGET 0.4 #define Y_TARGET 0.7 /* shooter and target positions in laser fight */ #define ISO_XSHIFT_LEFT -1.65 #define ISO_XSHIFT_RIGHT 0.4 #define ISO_YSHIFT_LEFT -0.05 #define ISO_YSHIFT_RIGHT -0.05 #define ISO_SCALE 0.85 /* coordinates for isospectral billiards */ /* You can add more billiard tables by adapting the functions */ /* xy_in_billiard and draw_billiard below */ /* Physical parameters of wave equation */ #define TWOSPEEDS 1 /* set to 1 to replace hardcore boundary by medium with different speed */ #define OSCILLATE_LEFT 1 /* set to 1 to add oscilating boundary condition on the left */ #define OSCILLATE_TOPBOT 0 /* set to 1 to enforce a planar wave on top and bottom boundary */ #define X_SHIFT -0.9 /* x range on which to apply OSCILLATE_TOPBOT */ #define OMEGA 0.00133333333 /* frequency of periodic excitation */ #define K_BC 3.0 /* spatial period of periodic excitation in y direction */ #define KX_BC 10.0 /* spatial period of periodic excitation in x direction */ #define KY_BC 3.3333 /* spatial period of periodic excitation in y direction */ // #define KX_BC 20.0 /* spatial period of periodic excitation in x direction */ // #define KY_BC 6.66666 /* spatial period of periodic excitation in y direction */ // #define OMEGA 0.002 /* frequency of periodic excitation */ // #define K_BC 3.0 /* spatial period of periodic excitation in y direction */ // #define KX_BC 30.0 /* spatial period of periodic excitation in x direction */ // #define KY_BC 10.0 /* spatial period of periodic excitation in y direction */ #define AMPLITUDE 1.0 /* amplitude of periodic excitation */ #define COURANT 0.02 /* Courant number */ #define COURANTB 0.015 /* Courant number in medium B */ // #define COURANTB 0.00666 /* Courant number in medium B */ #define GAMMA 3.0e-6 /* damping factor in wave equation */ #define GAMMAB 5.0e-4 /* damping factor in wave equation */ // #define GAMMA 2.0e-6 /* damping factor in wave equation */ // #define GAMMAB 2.5e-4 /* damping factor in wave equation */ #define GAMMA_SIDES 1.0e-4 /* damping factor on boundary */ #define GAMMA_TOPBOT 1.0e-6 /* damping factor on boundary */ #define KAPPA 0.0 /* "elasticity" term enforcing oscillations */ #define KAPPAB 1.0e-6 /* "elasticity" term enforcing oscillations */ #define KAPPA_SIDES 5.0e-4 /* "elasticity" term on absorbing boundary */ #define KAPPA_TOPBOT 0.0 /* "elasticity" term on absorbing boundary */ /* The Courant number is given by c*DT/DX, where DT is the time step and DX the lattice spacing */ /* The physical damping coefficient is given by GAMMA/(DT)^2 */ /* Increasing COURANT speeds up the simulation, but decreases accuracy */ /* For similar wave forms, COURANT^2*GAMMA should be kept constant */ /* Boundary conditions, see list in global_pdes.c */ #define B_COND 3 /* Parameters for length and speed of simulation */ #define NSTEPS 2000 /* number of frames of movie */ // #define NSTEPS 5500 /* number of frames of movie */ #define NVID 60 /* number of iterations between images displayed on screen */ #define NSEG 100 /* number of segments of boundary */ #define INITIAL_TIME 100 /* time after which to start saving frames */ #define BOUNDARY_WIDTH 2 /* width of billiard boundary */ #define PAUSE 1000 /* number of frames after which to pause */ #define PSLEEP 1 /* sleep time during pause */ #define SLEEP1 1 /* initial sleeping time */ #define SLEEP2 1 /* final sleeping time */ #define END_FRAMES 100 /* number of still frames at end of movie */ /* Parameters of initial condition */ #define INITIAL_AMP 0.2 /* amplitude of initial condition */ #define INITIAL_VARIANCE 0.002 /* variance of initial condition */ #define INITIAL_WAVELENGTH 0.1 /* wavelength of initial condition */ /* Plot type, see list in global_pdes.c */ #define PLOT 0 /* Color schemes */ #define COLOR_PALETTE 0 /* Color palette, see list in global_pdes.c */ #define BLACK 1 /* background */ #define COLOR_SCHEME 1 /* choice of color scheme, see list in global_pdes.c */ #define SCALE 0 /* set to 1 to adjust color scheme to variance of field */ #define SLOPE 1.0 /* sensitivity of color on wave amplitude */ #define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */ #define E_SCALE 2500.0 /* scaling factor for energy representation */ #define LOG_SCALE 1.0 /* scaling factor for energy log representation */ #define COLORHUE 260 /* initial hue of water color for scheme C_LUM */ #define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */ #define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */ #define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */ #define HUEMEAN 220.0 /* mean value of hue for color scheme C_HUE */ #define HUEAMP -50.0 /* amplitude of variation of hue for color scheme C_HUE */ /* mangrove properties */ #define MANGROVE_HUE_MIN 180.0 /* color of original mangrove */ #define MANGROVE_HUE_MAX -50.0 /* color of saturated mangrove */ // #define MANGROVE_EMAX 5.0e-3 /* max energy for mangrove to survive */ #define MANGROVE_EMAX 1.5e-3 /* max energy for mangrove to survive */ #define RANDOM_RADIUS 1 /* set to 1 for random circle radius */ #define ERODE_MANGROVES 1 /* set to 1 for mangroves to be eroded */ #define RECOVER_MANGROVES 1 /* set to 1 to allow mangroves to recover */ #define MOVE_MANGROVES 1 /* set to 1 for mobile mangroves */ #define DETACH_MANGROVES 1 /* set to 1 for mangroves to be able to detach */ #define INERTIA 1 /* set to 1 for taking inertia into account */ #define REPELL_MANGROVES 1 /* set to 1 for mangroves to repell each other */ #define DT_MANGROVE 0.1 /* time step for mangrove displacement */ #define KSPRING 0.05 /* spring constant of mangroves */ #define KWAVE 4.0 /* constant in force due to wave gradient */ #define KREPEL 5.0 /* constant in repelling force between mangroves */ #define REPEL_RADIUS 1.1 /* radius in which repelling force acts (in units of mangrove radius) */ #define DXMAX 0.02 /* max displacement of mangrove in one time step */ #define L_DETACH 0.25 /* spring length beyond which mangroves detach */ #define DAMP_MANGROVE 0.1 /* damping coefficient of mangroves */ #define MANGROVE_MASS 1.5 /* mass of mangrove of radius MU */ #define HASHX 25 /* size of hashgrid in x direction */ #define HASHY 15 /* size of hashgrid in y direction */ #define HASHMAX 10 /* maximal number of mangroves per hashgrid cell */ #define HASHGRID_PADDING 0.1 /* padding of hashgrid outside simulation window */ #define DRAW_COLOR_SCHEME 0 /* set to 1 to plot the color scheme */ #define COLORBAR_RANGE 8.0 /* scale of color scheme bar */ #define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */ #define ROTATE_COLOR_SCHEME 0 /* set to 1 to draw color scheme horizontally */ /* For debugging purposes only */ #define FLOOR 1 /* set to 1 to limit wave amplitude to VMAX */ #define VMAX 10.0 /* max value of wave amplitude */ #include "global_pdes.c" #include "sub_wave.c" #include "wave_common.c" double courant2, courantb2; /* Courant parameters squared */ typedef struct { double xc, yc, radius; /* center and radius of circle */ short int active; /* circle is active */ double energy; /* dissipated energy */ double yc_wrapped; /* position of circle centers wrapped vertically */ double anchorx; /* points moving circles are attached to */ double anchory; /* points moving circles are attached to */ double vx; /* x velocity of circles */ double vy; /* y velocity of circles */ double radius_initial; /* initial circle radii */ double mass_inv; /* inverse of mangrove mass */ short int attached; /* has value 1 if the circle is attached to its anchor */ int hashx; /* hash grid positions of mangroves */ int hashy; /* hash grid positions of mangroves */ } t_mangrove; typedef struct { int number; /* total number of mangroves in cell */ int mangroves[HASHMAX]; /* numbers of mangroves in cell */ } t_hashgrid; /*********************/ /* animation part */ /*********************/ void init_bc_phase(double left_bc[NY], double top_bc[NX], double bot_bc[NX]) /* initialize boundary condition phase KX*x + KY*y */ { int i, j; double xy[2]; for (j=0; j<NY; j++) { ij_to_xy(0, j, xy); left_bc[j] = KX_BC*XMIN + KY_BC*xy[1]; } for (i=0; i<NX; i++) { ij_to_xy(i, 0, xy); bot_bc[i] = KX_BC*xy[0] + KY_BC*YMIN; top_bc[i] = KX_BC*xy[0] + KY_BC*YMAX; } } // void evolve_wave_half_old(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX], // short int *xy_in[NX]) // // void evolve_wave_half(phi_in, psi_in, phi_out, psi_out, xy_in) // /* time step of field evolution */ // /* phi is value of field at time t, psi at time t-1 */ // { // int i, j, iplus, iminus, jplus, jminus, tb_shift; // double delta, x, y, c, cc, gamma, kappa, phase, phasemin; // static long time = 0; // static int init_bc = 1; // static double left_bc[NY], top_bc[NX], bot_bc[NX]; // // time++; // // /* initialize boundary condition phase KX*x + KY*y */ // if ((OSCILLATE_LEFT)&&(init_bc)) // { // init_bc_phase(left_bc, top_bc, bot_bc); // tb_shift = (int)((X_SHIFT - XMIN)*(double)NX/(XMAX - XMIN)); // printf("tb_shift %i\n", tb_shift); // init_bc = 0; // } // // #pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y,c,cc,gamma,kappa) // for (i=0; i<NX; i++){ // for (j=0; j<NY; j++){ // if (xy_in[i][j]) // { // c = COURANT; // cc = courant2; // gamma = GAMMA; // kappa = KAPPA; // } // else if (TWOSPEEDS) // { // c = COURANTB; // cc = courantb2; // gamma = GAMMAB; // kappa = KAPPAB; // } // // if ((TWOSPEEDS)||(xy_in[i][j])){ // /* discretized Laplacian for various boundary conditions */ // if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING)) // { // iplus = (i+1); if (iplus == NX) iplus = NX-1; // iminus = (i-1); if (iminus == -1) iminus = 0; // jplus = (j+1); if (jplus == NY) jplus = NY-1; // jminus = (j-1); if (jminus == -1) jminus = 0; // } // else if (B_COND == BC_PERIODIC) // { // iplus = (i+1) % NX; // iminus = (i-1) % NX; // if (iminus < 0) iminus += NX; // jplus = (j+1) % NY; // jminus = (j-1) % NY; // if (jminus < 0) jminus += NY; // } // else if (B_COND == BC_VPER_HABS) // { // iplus = (i+1); if (iplus == NX) iplus = NX-1; // iminus = (i-1); if (iminus == -1) iminus = 0; // jplus = (j+1) % NY; // jminus = (j-1) % NY; // if (jminus < 0) jminus += NY; // } // // /* imposing linear wave on top and bottom by making Laplacian 1d */ // if ((OSCILLATE_TOPBOT)&&(i < tb_shift)) // { // if (j == NY-1) // { // jminus = NY-1; // jplus = NY-1; // } // else if (j == 0) // { // jminus = 0; // jplus = 0; // } // } // // delta = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j]; // // x = phi_in[i][j]; // y = psi_in[i][j]; // // /* evolve phi */ // if ((B_COND == BC_PERIODIC)||(B_COND == BC_DIRICHLET)) // phi_out[i][j] = -y + 2*x + cc*delta - kappa*x - gamma*(x-y); // else if (B_COND == BC_ABSORBING) // { // if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1)) // phi_out[i][j] = -y + 2*x + cc*delta - kappa*x - gamma*(x-y); // // /* upper border */ // else if (j==NY-1) // phi_out[i][j] = x - c*(x - phi_in[i][NY-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); // // /* lower border */ // else if (j==0) // phi_out[i][j] = x - c*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); // // /* right border */ // if (i==NX-1) // phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); // // /* left border */ // else if (i==0) // phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); // } // else if (B_COND == BC_VPER_HABS) // { // if ((i>0)&&(i<NX-1)) // phi_out[i][j] = -y + 2*x + cc*delta - kappa*x - gamma*(x-y); // // /* right border */ // else if (i==NX-1) // phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); // // /* left border */ // else if (i==0) // phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); // } // psi_out[i][j] = x; // // /* add oscillating boundary condition on the left */ // // if ((i == 0)&&(OSCILLATE_LEFT)) // // { // // phase = (double)time*OMEGA - DPI*K_BC*(double)j/(double)NY; // // if (phase < 0.0) phase = 0.0; // // phi_out[i][j] = AMPLITUDE*sin(phase); // // } // // /* add oscillating boundary condition on the left */ // if (OSCILLATE_LEFT) // { // phasemin = left_bc[0]; // if (i == 0) // { // phase = (double)time*OMEGA - left_bc[j] + phasemin; // if (phase < 0.0) phase = 0.0; // phi_out[i][j] = AMPLITUDE*sin(phase); // } // if ((j == 0)&&(i < tb_shift)) // { // phase = (double)time*OMEGA - bot_bc[i] + phasemin; // if (phase < 0.0) phase = 0.0; // phi_out[i][j] = AMPLITUDE*sin(phase); // } // else if ((j == NY-1)&&(i < tb_shift)) // { // phase = (double)time*OMEGA - top_bc[i] + phasemin; // if (phase < 0.0) phase = 0.0; // phi_out[i][j] = AMPLITUDE*sin(phase); // } // } // // if (FLOOR) // { // if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX; // if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX; // if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX; // if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX; // } // } // } // } // // printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]); // } void evolve_wave_half(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX], short int *xy_in[NX]) /* time step of field evolution */ /* phi is value of field at time t, psi at time t-1 */ /* this version of the function has been rewritten in order to minimize the number of if-branches */ { int i, j, iplus, iminus, jplus, jminus, tb_shift; double delta, x, y, c, cc, gamma, kappa, phase, phasemin; static long time = 0; static double tc[NX][NY], tcc[NX][NY], tgamma[NX][NY], left_bc[NY], top_bc[NX], bot_bc[NX]; static short int first = 1, init_bc = 1; time++; /* initialize boundary condition phase KX*x + KY*y */ if ((OSCILLATE_LEFT)&&(init_bc)) { init_bc_phase(left_bc, top_bc, bot_bc); tb_shift = (int)((X_SHIFT - XMIN)*(double)NX/(XMAX - XMIN)); printf("tb_shift %i\n", tb_shift); init_bc = 0; } /* initialize tables with wave speeds and dissipation */ // if (first) { for (i=0; i<NX; i++){ for (j=0; j<NY; j++){ if (xy_in[i][j] != 0) { tc[i][j] = COURANT; tcc[i][j] = courant2; if (xy_in[i][j] == 1) tgamma[i][j] = GAMMA; else tgamma[i][j] = GAMMAB; } else if (TWOSPEEDS) { tc[i][j] = COURANTB; tcc[i][j] = courantb2; tgamma[i][j] = GAMMAB; } } } // first = 0; } #pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y) /* evolution in the bulk */ for (i=1; i<NX-1; i++){ for (j=1; j<NY-1; j++){ if ((TWOSPEEDS)||(xy_in[i][j] != 0)){ x = phi_in[i][j]; y = psi_in[i][j]; /* discretized Laplacian */ delta = phi_in[i+1][j] + phi_in[i-1][j] + phi_in[i][j+1] + phi_in[i][j-1] - 4.0*x; /* evolve phi */ phi_out[i][j] = -y + 2*x + tcc[i][j]*delta - KAPPA*x - tgamma[i][j]*(x-y); psi_out[i][j] = x; } } } /* left boundary */ // if (OSCILLATE_LEFT) for (j=1; j<NY-1; j++) phi_out[0][j] = AMPLITUDE*cos((double)time*OMEGA); if (OSCILLATE_LEFT) for (j=1; j<NY-1; j++) { phasemin = left_bc[0]; phase = (double)time*OMEGA - left_bc[j] + phasemin; if (phase < 0.0) phase = 0.0; phi_out[0][j] = AMPLITUDE*sin(phase); } else for (j=1; j<NY-1; j++){ if ((TWOSPEEDS)||(xy_in[0][j] != 0)){ x = phi_in[0][j]; y = psi_in[0][j]; switch (B_COND) { case (BC_DIRICHLET): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y); break; } case (BC_PERIODIC): { delta = phi_in[1][j] + phi_in[NX-1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 4.0*x; phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y); break; } case (BC_ABSORBING): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } case (BC_VPER_HABS): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } } psi_out[0][j] = x; } } /* right boundary */ for (j=1; j<NY-1; j++){ if ((TWOSPEEDS)||(xy_in[NX-1][j] != 0)){ x = phi_in[NX-1][j]; y = psi_in[NX-1][j]; switch (B_COND) { case (BC_DIRICHLET): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y); break; } case (BC_PERIODIC): { delta = phi_in[NX-2][j] + phi_in[0][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 4.0*x; phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y); break; } case (BC_ABSORBING): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } case (BC_VPER_HABS): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } } psi_out[NX-1][j] = x; } } /* top boundary */ for (i=0; i<NX; i++){ if ((TWOSPEEDS)||(xy_in[i][NY-1] != 0)){ x = phi_in[i][NY-1]; y = psi_in[i][NY-1]; if ((OSCILLATE_TOPBOT)&&(i < tb_shift)) { iplus = i+1; iminus = i-1; if (iminus < 0) iminus = 0; delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + - 2.0*x; phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y); } else if ((OSCILLATE_LEFT)&&(i < tb_shift)) { phasemin = left_bc[0]; phase = (double)time*OMEGA - top_bc[i] + phasemin; if (phase < 0.0) phase = 0.0; phi_out[i][NY-1] = AMPLITUDE*sin(phase); } else switch (B_COND) { case (BC_DIRICHLET): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] - 3.0*x; phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y); break; } case (BC_PERIODIC): { iplus = (i+1) % NX; iminus = (i-1) % NX; if (iminus < 0) iminus += NX; delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] + phi_in[i][0] - 4.0*x; phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y); break; } case (BC_ABSORBING): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] - 3.0*x; phi_out[i][NY-1] = x - tc[i][NY-1]*(x - phi_in[i][NY-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); break; } case (BC_VPER_HABS): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][NY-1] + phi_in[iminus][NY-1] + phi_in[i][NY-2] + phi_in[i][0] - 4.0*x; phi_out[i][NY-1] = -y + 2*x + tcc[i][NY-1]*delta - KAPPA*x - tgamma[i][NY-1]*(x-y); break; } } psi_out[i][NY-1] = x; } } /* bottom boundary */ for (i=0; i<NX; i++){ if ((TWOSPEEDS)||(xy_in[i][0] != 0)){ x = phi_in[i][0]; y = psi_in[i][0]; if ((OSCILLATE_TOPBOT)&&(i < tb_shift)) { iplus = i+1; iminus = i-1; if (iminus < 0) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + - 2.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); } else if ((OSCILLATE_LEFT)&&(i < tb_shift)) { phasemin = left_bc[0]; phase = (double)time*OMEGA - bot_bc[i] + phasemin; if (phase < 0.0) phase = 0.0; phi_out[i][0] = AMPLITUDE*sin(phase); } else switch (B_COND) { case (BC_DIRICHLET): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } case (BC_PERIODIC): { iplus = (i+1) % NX; iminus = (i-1) % NX; if (iminus < 0) iminus += NX; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][NY-1] - 4.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } case (BC_ABSORBING): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x; phi_out[i][0] = x - tc[i][0]*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); break; } case (BC_VPER_HABS): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][NY-1] - 4.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } } psi_out[i][0] = x; } } /* add oscillating boundary condition on the left corners - NEEDED ? */ if ((i == 0)&&(OSCILLATE_LEFT)) { phi_out[i][0] = AMPLITUDE*cos((double)time*OMEGA); phi_out[i][NY-1] = AMPLITUDE*cos((double)time*OMEGA); } /* for debugging purposes/if there is a risk of blow-up */ if (FLOOR) for (i=0; i<NX; i++){ for (j=0; j<NY; j++){ if (xy_in[i][j] != 0) { if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX; if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX; if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX; if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX; } } } } void evolve_wave(double *phi[NX], double *psi[NX], double *phi_tmp[NX], double *psi_tmp[NX], short int *xy_in[NX]) /* time step of field evolution */ /* phi is value of field at time t, psi at time t-1 */ { evolve_wave_half(phi, psi, phi_tmp, psi_tmp, xy_in); evolve_wave_half(phi_tmp, psi_tmp, phi, psi, xy_in); } void hash_xy_to_ij(double x, double y, int ij[2]) { static int first = 1; static double lx, ly; int i, j; if (first) { lx = XMAX - XMIN + 2.0*HASHGRID_PADDING; ly = YMAX - YMIN + 2.0*HASHGRID_PADDING; first = 0; } i = (int)((double)HASHX*(x - XMIN + HASHGRID_PADDING)/lx); j = (int)((double)HASHY*(y - YMIN + HASHGRID_PADDING)/ly); if (i<0) i = 0; if (i>=HASHX) i = HASHX-1; if (j<0) j = 0; if (j>=HASHY) j = HASHY-1; ij[0] = i; ij[1] = j; // printf("Mapped (%.3f,%.3f) to (%i, %i)\n", x, y, ij[0], ij[1]); } void compute_repelling_force(int i, int j, double force[2], t_mangrove* mangrove) /* compute repelling force of mangrove j on mangrove i */ { double x1, y1, x2, y2, distance, r, f; x1 = mangrove[i].xc; y1 = mangrove[i].yc; x2 = mangrove[j].xc; y2 = mangrove[j].yc; distance = module2(x2 - x1, y2 - y1); r = mangrove[i].radius + mangrove[j].radius; if (r <= 0.0) r = 0.001*MU; f = KREPEL/(0.001 + distance*distance); if ((distance > 0.0)&&(distance < REPEL_RADIUS*r)) { force[0] = f*(x1 - x2)/distance; force[1] = f*(y1 - y2)/distance; } else { force[0] = 0.0; force[1] = 0.0; } } void update_hashgrid(t_mangrove* mangrove, int* hashgrid_number, int* hashgrid_mangroves) { int i, j, k, n, m, ij[2], max = 0; printf("Updating hashgrid_number\n"); for (i=0; i<HASHX*HASHY; i++) hashgrid_number[i] = 0; printf("Updated hashgrid_number\n"); /* place each mangrove in hash grid */ for (k=1; k<ncircles; k++) // if (circleactive[k]) { // printf("placing circle %i\t", k); hash_xy_to_ij(mangrove[k].xc, mangrove[k].yc, ij); i = ij[0]; j = ij[1]; // printf("ij = (%i, %i)\t", i, j); n = hashgrid_number[i*HASHY + j]; m = i*HASHY*HASHMAX + j*HASHMAX + n; // printf("n = %i, m = %i\n", n, m); if (m < HASHX*HASHY*HASHMAX) hashgrid_mangroves[m] = k; else printf("Too many mangroves in hash cell, try increasing HASHMAX\n"); hashgrid_number[i*HASHY + j]++; mangrove[k].hashx = i; mangrove[k].hashy = j; if (n > max) max = n; // printf("Placed mangrove %i at (%i,%i) in hashgrid\n", k, ij[0], ij[1]); // printf("%i mangroves at (%i,%i)\n", hashgrid_number[ij[0]][ij[1]], ij[0], ij[1]); } printf("Maximal number of mangroves per hash cell: %i\n", max); } void animation() { double time, scale, diss, rgb[3], hue, y, dissip, ej, gradient[2], dx, dy, dt, xleft, xright, length, fx, fy, force[2]; double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX]; short int *xy_in[NX], redraw = 0; int i, j, k, n, s, ij[2], i0, iplus, iminus, j0, jplus, jminus, p, q; static int imin, imax; static short int first = 1; t_mangrove *mangrove; int *hashgrid_number, *hashgrid_mangroves; t_hashgrid *hashgrid; /* Since NX and NY are big, it seemed wiser to use some memory allocation here */ for (i=0; i<NX; i++) { phi[i] = (double *)malloc(NY*sizeof(double)); psi[i] = (double *)malloc(NY*sizeof(double)); phi_tmp[i] = (double *)malloc(NY*sizeof(double)); psi_tmp[i] = (double *)malloc(NY*sizeof(double)); xy_in[i] = (short int *)malloc(NY*sizeof(short int)); } mangrove = (t_mangrove *)malloc(NMAXCIRCLES*sizeof(t_mangrove)); /* mangroves */ hashgrid = (t_hashgrid *)malloc(HASHX*HASHY*sizeof(t_hashgrid)); /* hashgrid */ hashgrid_number = (int *)malloc(HASHX*HASHY*sizeof(int)); /* total number of mangroves in each hash grid cell */ hashgrid_mangroves = (int *)malloc(HASHX*HASHY*HASHMAX*sizeof(int)); /* numbers of mangroves in each hash grid cell */ /* initialise positions and radii of circles */ if ((B_DOMAIN == D_CIRCLES)||(B_DOMAIN == D_CIRCLES_IN_RECT)) init_circle_config(circles); else if (B_DOMAIN == D_POLYGONS) init_polygon_config(polygons); courant2 = COURANT*COURANT; courantb2 = COURANTB*COURANTB; // dt = 0.01; /* initialize wave with a drop at one point, zero elsewhere */ init_wave_flat(phi, psi, xy_in); /* initialise mangroves */ for (i=0; i < ncircles; i++) { /* to avoid having to recode init_circle_config, would be more elegant in C++ */ mangrove[i].xc = circles[i].xc; mangrove[i].yc = circles[i].yc; mangrove[i].radius = circles[i].radius; mangrove[i].active = circles[i].active; mangrove[i].energy = 0.0; y = mangrove[i].yc; if (y >= YMAX) y -= mangrove[i].radius; if (y <= YMIN) y += mangrove[i].radius; // if (y >= YMAX) y -= (YMAX - YMIN); // if (y <= YMIN) y += (YMAX - YMIN); mangrove[i].yc_wrapped = y; // mangrove[i].active = 1; if (RANDOM_RADIUS) mangrove[i].radius = mangrove[i].radius*(0.75 + 0.5*((double)rand()/RAND_MAX)); mangrove[i].radius_initial = mangrove[i].radius; mangrove[i].attached = 1; mangrove[i].mass_inv = MU*MU/(MANGROVE_MASS*mangrove[i].radius*mangrove[i].radius); if (MOVE_MANGROVES) { mangrove[i].anchorx = mangrove[i].xc; mangrove[i].anchory = mangrove[i].yc_wrapped; // mangrove[i].anchory = mangrove[i].yc; } if (INERTIA) { mangrove[i].vx = 0.0; mangrove[i].vy = 0.0; } } /* initialise hash table for interacting mangroves */ if (REPELL_MANGROVES) update_hashgrid(mangrove, hashgrid_number, hashgrid_mangroves); if (first) /* compute box limits where circles are reset */ { /* find leftmost and rightmost circle */ for (i=0; i<ncircles; i++) if ((mangrove[i].active)&&(mangrove[i].xc - mangrove[i].radius < xleft)) xleft = mangrove[i].xc - mangrove[i].radius; for (i=0; i<ncircles; i++) if ((mangrove[i].active)&&(mangrove[i].xc + mangrove[i].radius > xright)) xright = mangrove[i].xc + mangrove[i].radius; xy_to_ij(xleft, 0.0, ij); imin = ij[0] - 10; if (imin < 0) imin = 0; xy_to_ij(xright, 0.0, ij); imax = ij[0]; if (imax >= NX) imax = NX-1; first = 0; printf("xleft = %.3lg, xright = %.3lg, imin = %i, imax = %i\n", xleft, xright, imin, imax); } blank(); glColor3f(0.0, 0.0, 0.0); draw_wave(phi, psi, xy_in, 1.0, 0, PLOT); draw_billiard(); glutSwapBuffers(); sleep(SLEEP1); for (i=0; i<=INITIAL_TIME + NSTEPS; i++) { printf("Computing frame %d\n",i); /* compute the variance of the field to adjust color scheme */ /* the color depends on the field divided by sqrt(1 + variance) */ if (SCALE) { scale = sqrt(1.0 + compute_variance(phi,psi, xy_in)); // printf("Scaling factor: %5lg\n", scale); } else scale = 1.0; printf("Drawing wave\n"); draw_wave(phi, psi, xy_in, scale, i, PLOT); printf("Evolving wave\n"); for (j=0; j<NVID; j++) { // printf("%i ", j); evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in); // if (i % 10 == 9) oscillate_linear_wave(0.2*scale, 0.15*(double)(i*NVID + j), -1.5, YMIN, -1.5, YMAX, phi, psi); } /* move mangroves */ if (MOVE_MANGROVES) for (j=0; j<ncircles; j++) if (mangrove[j].active) { compute_gradient(phi, psi, mangrove[j].xc, mangrove[j].yc_wrapped, gradient); // printf("gradient = (%.3lg, %.3lg)\t", gradient[0], gradient[1]); // if (j%NGRIDY == 0) printf("gradient (%.3lg, %.3lg)\n", gradient[0], gradient[1]); // if (j%NGRIDY == 0) printf("circle %i (%.3lg, %.3lg) -> ", j, mangrove[j].xc, mangrove[j].yc); /* compute force of wave */ dx = DT_MANGROVE*KWAVE*gradient[0]; dy = DT_MANGROVE*KWAVE*gradient[1]; /* compute force of spring */ if (mangrove[j].attached) { dx += DT_MANGROVE*(-KSPRING*(mangrove[j].xc - mangrove[j].anchorx)); dy += DT_MANGROVE*(-KSPRING*(mangrove[j].yc_wrapped - mangrove[j].anchory)); } /* compute repelling force from other mangroves */ if (REPELL_MANGROVES) { /* determine neighboring grid points */ i0 = mangrove[j].hashx; iminus = i0 - 1; if (iminus < 0) iminus = 0; iplus = i0 + 1; if (iplus >= HASHX) iplus = HASHX-1; j0 = mangrove[j].hashy; jminus = j0 - 1; if (jminus < 0) jminus = 0; jplus = j0 + 1; if (jplus >= HASHY) jplus = HASHY-1; fx = 0.0; fy = 0.0; for (p=iminus; p<= iplus; p++) for (q=jminus; q<= jplus; q++) for (k=0; k<hashgrid_number[p*HASHY+q]; k++) if (mangrove[hashgrid_mangroves[p*HASHY*HASHMAX + q*HASHMAX + k]].active) { compute_repelling_force(j, hashgrid_mangroves[p*HASHY*HASHMAX + q*HASHMAX + k], force, mangrove); fx += force[0]; fy += force[1]; } // if (fx*fx + fy*fy > 0.001) printf("Force on mangrove %i: (%.3f, %.3f)\n", j, fx, fy); dx += DT_MANGROVE*fx; dy += DT_MANGROVE*fy; } /* detach mangrove if spring is too long */ if (DETACH_MANGROVES) { length = module2(mangrove[j].xc - mangrove[j].anchorx, mangrove[j].yc_wrapped - mangrove[j].anchory); // if (j%NGRIDY == 0) printf("spring length %.i: %.3lg\n", j, length); // if (length > L_DETACH) mangrove[j].attached = 0; if (length*mangrove[j].mass_inv > L_DETACH) mangrove[j].attached = 0; } if (dx > DXMAX) dx = DXMAX; if (dx < -DXMAX) dx = -DXMAX; if (dy > DXMAX) dy = DXMAX; if (dy < -DXMAX) dy = -DXMAX; if (INERTIA) { mangrove[j].vx += (dx - DAMP_MANGROVE*mangrove[j].vx)*mangrove[j].mass_inv; mangrove[j].vy += (dy - DAMP_MANGROVE*mangrove[j].vy)*mangrove[j].mass_inv; mangrove[j].xc += mangrove[j].vx*DT_MANGROVE; mangrove[j].yc += mangrove[j].vy*DT_MANGROVE; mangrove[j].yc_wrapped += mangrove[j].vy*DT_MANGROVE; // if (j%NGRIDY == 0) // printf("circle %.i: (dx,dy) = (%.3lg,%.3lg), (vx,vy) = (%.3lg,%.3lg)\n", // j, mangrove[j].xc-mangrove[j].anchorx, mangrove[j].yc-mangrove[j].anchory, mangrove[j].vx, mangrove[j].vy); } else { mangrove[j].xc += dx*mangrove[j].mass_inv*DT_MANGROVE; mangrove[j].yc += dy*mangrove[j].mass_inv*DT_MANGROVE; mangrove[j].yc_wrapped += dy*mangrove[j].mass_inv*DT_MANGROVE; } if (mangrove[j].xc <= XMIN) mangrove[j].xc = XMIN; if (mangrove[j].xc >= XMAX) mangrove[j].xc = XMAX; if (mangrove[j].yc_wrapped <= YMIN) mangrove[j].yc_wrapped = YMIN; if (mangrove[j].yc_wrapped >= YMAX) mangrove[j].yc_wrapped = YMAX; // if (j%NGRIDY == 0) printf("(%.3lg, %.3lg)\n", mangrove[j].xc, mangrove[j].yc); redraw = 1; } /* test for debugging */ if (1) for (j=0; j<ncircles; j++) { dissip = compute_dissipation(phi, psi, xy_in, mangrove[j].xc, mangrove[j].yc_wrapped); /* make sure the dissipation does not grow too fast because of round-off/blow-up */ if (dissip > 0.1*MANGROVE_EMAX) { dissip = 0.1*MANGROVE_EMAX; printf("Flooring dissipation!\n"); } if (mangrove[j].active) { mangrove[j].energy += dissip; ej = mangrove[j].energy; // printf("ej = %.3f\n", ej); if (ej <= MANGROVE_EMAX) { if (ej > 0.0) { hue = MANGROVE_HUE_MIN + (MANGROVE_HUE_MAX - MANGROVE_HUE_MIN)*ej/MANGROVE_EMAX; if (hue < 0.0) hue += 360.0; } else hue = MANGROVE_HUE_MIN; hsl_to_rgb(hue, 0.9, 0.5, rgb); // if (j%NGRIDY == 0) printf("Circle %i, energy %.5lg, hue %.5lg\n", j, ej, hue); draw_colored_circle(mangrove[j].xc, mangrove[j].yc, mangrove[j].radius, NSEG, rgb); /* shrink mangrove */ if ((ERODE_MANGROVES)&&(ej > 0.0)) { mangrove[j].radius = mangrove[j].radius_initial*(1.0 - ej*ej/(MANGROVE_EMAX*MANGROVE_EMAX)); redraw = 1; } else mangrove[j].radius = mangrove[j].radius_initial; } else /* remove mangrove */ { mangrove[j].active = 0; /* reinitialize table xy_in */ redraw = 1; } } else if (RECOVER_MANGROVES) /* allow disabled mangroves to recover */ { mangrove[j].energy -= 0.15*dissip; printf("Circle %i energy %.3lg\n", j, mangrove[j].energy); if (mangrove[j].energy < 0.0) { printf("Reactivating circle %i?\n", j); /* THE PROBLEM occurs when circleactive[0] is set to 1 again */ if (j>0) mangrove[j].active = 1; mangrove[j].radius = mangrove[j].radius_initial; mangrove[j].energy = -MANGROVE_EMAX; /* reinitialize table xy_in */ redraw = 1; } } } /* for compatibility with draw_billiard, may be improvable */ for (j=0; j<ncircles; j++) { circles[j].xc = mangrove[j].xc; circles[j].yc = mangrove[j].yc; circles[j].radius = mangrove[j].radius; } /* compute energy dissipated in obstacles */ /* if (ERODE_MANGROVES) for (j=0; j<ncircles; j++) { // printf("j = %i\t", j); dissip = compute_dissipation(phi, psi, xy_in, mangrove[j].xc, mangrove[j].yc_wrapped); printf("dissip = %.3f\t", dissip); /* make sure the dissipation does not grow too fast because of round-off/blow-up */ // if (dissip > 0.1*MANGROVE_EMAX) // { // dissip = 0.1*MANGROVE_EMAX; // printf("Flooring dissipation!\n"); // } // // if (mangrove[j].active) // { // mangrove[j].energy += dissip; // ej = mangrove[j].energy; // printf("ej = %.3f\n", ej); // if (ej <= MANGROVE_EMAX) // { // if (ej > 0.0) // { // hue = MANGROVE_HUE_MIN + (MANGROVE_HUE_MAX - MANGROVE_HUE_MIN)*ej/MANGROVE_EMAX; // if (hue < 0.0) hue += 360.0; // } // else hue = MANGROVE_HUE_MIN; // hsl_to_rgb(hue, 0.9, 0.5, rgb); // if (j%NGRIDY == 0) printf("Circle %i, energy %.5lg, hue %.5lg\n", j, ej, hue); // draw_colored_circle(mangrove[j].xc, mangrove[j].yc, mangrove[j].radius, NSEG, rgb); // // /* shrink mangrove */ // if (ej > 0.0) // { // mangrove[j].radius -= MU*ej*ej/(MANGROVE_EMAX*MANGROVE_EMAX); // if (mangrove[j].radius < 0.0) mangrove[j].radius = 0.0; // mangrove[j].radius = mangrove[j].radius_initial*(1.0 - ej*ej/(MANGROVE_EMAX*MANGROVE_EMAX)); // redraw = 1; // } // else mangrove[j].radius = mangrove[j].radius_initial; // } // else /* remove mangrove */ // { // mangrove[j].active = 0; /* reinitialize table xy_in */ // redraw = 1; // } // } // else /* allow disabled mangroves to recover */ // { // mangrove[j].energy -= 0.15*dissip; // printf("ej = %.3f\n", mangrove[j].energy); // mangrove[j].radius += 0.005*MU; // if (mangrove[j].radius > MU) mangrove[j].radius = MU; // if ((mangrove[j].energy < 0.0)&&(mangrove[j].radius > 0.0)) // if (mangrove[j].energy < 0.0) // { // mangrove[j].active = 1; // mangrove[j].radius = mangrove[j].radius*(0.75 + 0.5*((double)rand()/RAND_MAX)); // mangrove[j].radius = mangrove[j].radius_initial; // mangrove[j].energy = -MANGROVE_EMAX; /* reinitialize table xy_in */ // redraw = 1; // } // } // printf("Circle %i, energy %.5lg\n", j, mangrove[j].energy); // } printf("Updating hashgrid\n"); if (REPELL_MANGROVES) update_hashgrid(mangrove, hashgrid_number, hashgrid_mangroves); printf("Drawing billiard\n"); draw_billiard(); glutSwapBuffers(); if (redraw) { printf("Reinitializing xy_in\n"); init_xyin_xrange(xy_in, imin, NX); // init_xyin_xrange(xy_in, imin, imax); } redraw = 0; if (MOVIE) { if (i >= INITIAL_TIME) save_frame(); else printf("Initial phase time %i of %i\n", i, INITIAL_TIME); /* it seems that saving too many files too fast can cause trouble with the file system */ /* so this is to make a pause from time to time - parameter PAUSE may need adjusting */ if (i % PAUSE == PAUSE - 1) { printf("Making a short pause\n"); sleep(PSLEEP); s = system("mv wave*.tif tif_wave/"); } } } if (MOVIE) { for (i=0; i<END_FRAMES; i++) save_frame(); s = system("mv wave*.tif tif_wave/"); } for (i=0; i<NX; i++) { free(phi[i]); free(psi[i]); free(phi_tmp[i]); free(psi_tmp[i]); free(xy_in[i]); } free(mangrove); free(hashgrid_number); free(hashgrid_mangroves); } void display(void) { glPushMatrix(); blank(); glutSwapBuffers(); blank(); glutSwapBuffers(); animation(); sleep(SLEEP2); glPopMatrix(); glutDestroyWindow(glutGetWindow()); } int main(int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(WINWIDTH,WINHEIGHT); glutCreateWindow("Wave equation in a planar domain"); init(); glutDisplayFunc(display); glutMainLoop(); return 0; }
pado_unw_vec_para.h
/* * pado.h * * Created on: Sep 4, 2018 * Author: Zhen Peng */ #ifndef INCLUDES_PADO_UNW_PARA_VEC_H_ #define INCLUDES_PADO_UNW_PARA_VEC_H_ #include <vector> #include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> #include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <cmath> #include "globals.h" #include "avx512_pado.h" #include "graph.h" #include <omp.h> using std::vector; using std::unordered_map; using std::map; using std::bitset; using std::stable_sort; using std::min; using std::fill; namespace PADO { //inti NUM_THREADS = 4; //const inti BATCH_SIZE = 1024; // The size for regular batch and bit array. //const inti BITPARALLEL_SIZE = 50; //const inti THRESHOLD_PARALLEL = 80; //// AVX-512 constant variables //const inti NUM_P_INT = 16; //const __m512i INF_v = _mm512_set1_epi32(WEIGHTI_MAX); //const __m512i UNDEF_i32_v = _mm512_undefined_epi32(); //const __m512i LOWEST_BYTE_MASK = _mm512_set1_epi32(0xFF); //const __m128i INF_v_128i = _mm_set1_epi8(-1); //const inti NUM_P_BP_LABEL = 8; ////const inti REMAINDER_BP = BITPARALLEL_SIZE % NUM_P_BP_LABEL; ////const inti BOUND_BP_I = BITPARALLEL_SIZE - REMAINDER_BP; ////const __mmask8 IN_EPI64_M = (__mmask8) ((uint8_t) 0xFF >> (NUM_P_BP_LABEL - REMAINDER_BP)); ////const __mmask16 IN_128I_M = (__mmask16) ((uint16_t) 0xFFFF >> (NUM_P_INT - REMAINDER_BP)); //const __mmask16 FIRST_8_LANES = (__mmask16) ((uint16_t) 0xFFFF >> 8); //const __m512i INF_v_epi64 = _mm512_set1_epi64(WEIGHTI_MAX); //const __m512i ZERO_epi64_v = _mm512_set1_epi64(0); //const __m512i MINUS_2_epi64_v = _mm512_set1_epi64(-2); //const __m512i MINUS_1_epi64_v = _mm512_set1_epi64(-1); //// Batch based processing, 09/11/2018 template <inti BATCH_SIZE = 1024> class ParaVertexCentricPLLVec { private: static const inti BITPARALLEL_SIZE = 50; idi num_v_ = 0; const inti THRESHOLD_PARALLEL = 80; static const inti NUM_P_BP_LABEL = 8; static const inti REMAINDER_BP = BITPARALLEL_SIZE % NUM_P_BP_LABEL; static const inti BOUND_BP_I = BITPARALLEL_SIZE - REMAINDER_BP; static const __mmask8 IN_EPI64_M = (__mmask8) ((uint8_t) 0xFF >> (NUM_P_BP_LABEL - REMAINDER_BP)); static const __mmask16 IN_128I_M = (__mmask16) ((uint16_t) 0xFFFF >> (NUM_P_INT - REMAINDER_BP)); // Structure for the type of label struct IndexType { // struct Batch { // idi batch_id; // Batch ID // idi start_index; // Index to the array distances where the batch starts // inti size; // Number of distances element in this batch // // Batch(idi batch_id_, idi start_index_, inti size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { // ; // } // }; // // struct DistanceIndexType { // idi start_index; // Index to the array vertices where the same-ditance vertices start // inti size; // Number of the same-distance vertices // smalli dist; // The real distance // // DistanceIndexType(idi start_index_, inti size_, smalli dist_): // start_index(start_index_), size(size_), dist(dist_) // { // ; // } // }; smalli bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} uint64_t bp_sets_0[BITPARALLEL_SIZE]; uint64_t bp_sets_1[BITPARALLEL_SIZE]; // vector<Batch> batches; // Batch info // vector<DistanceIndexType> distances; // Distance info idi last_start = 0; // The start index of labels in the last iteration idi last_size = 0; // The size of labels in the last iteration vector<idi> vertices; // Vertices in the label, preresented as temperory ID vector<weighti> label_dists; // Used for SIMD, storing distances of labels. }; //__attribute__((aligned(64))); // Structure for the type of temporary label struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, when do initialization, only initialize those short_index[v] whose indicator[BATCH_SIZE] is set. bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // bitset<BATCH_SIZE> candidates; // Candidates one iteration, candidates[r] is set means root r is candidate in this iteration // Use a queue to store candidates vector<inti> candidates_que = vector<inti>(BATCH_SIZE); inti end_candidates_que = 0; vector<uint8_t> is_candidate = vector<uint8_t>(BATCH_SIZE, 0); }; //__attribute__((aligned(64))); // Structure of the public ordered index for distance queries. struct IndexOrdered { weighti bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<idi> label_id; vector<weighti> label_dists; }; vector<IndexType> L; vector<IndexOrdered> Index; // Ordered labels for original vertex ID void construct(const Graph &G); inline void bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots); // inline void bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<bool> &used_bp_roots); inline void batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated); // inline void batch_process( // const Graph &G, // idi b_id, // idi root_start, // inti roots_size, // vector<IndexType> &L, // const vector<bool> &used_bp_roots); inline void initialize( vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots); inline void push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, idi &offset_tmp_candidate_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter); inline bool distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector< vector<smalli> > &dist_matrix, smalli iter); inline void insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix, smalli iter); inline void update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter); inline void reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix); // Some parallel interfaces inline idi prefix_sum_for_offsets( vector<idi> &offsets); template <typename T> inline void collect_into_queue( vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue vector<T> &queue, idi &end_queue); template <typename T, typename Int> inline void TS_enqueue( vector<T> &queue, Int &end_queue, const T &e); // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; // vector<double> thds_adding_time = vector<double>(80, 0.0); // vector<uint64_t> thds_adding_count = vector<uint64_t>(80, 0); // L2CacheMissRate cache_miss; // End test public: ParaVertexCentricPLLVec() = default; ParaVertexCentricPLLVec(const Graph &G); weighti query( idi u, idi v); void print(); void switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank); void store_index_to_file( const char *filename, const vector<idi> &rank); void load_index_from_file( const char *filename); void order_labels( const vector<idi> &rank2id, const vector<idi> &rank); weighti query_distance( idi a, idi b); }; // class ParaVertexCentricPLLVec template <inti BATCH_SIZE> const inti ParaVertexCentricPLLVec<BATCH_SIZE>::BITPARALLEL_SIZE; template <inti BATCH_SIZE> ParaVertexCentricPLLVec<BATCH_SIZE>::ParaVertexCentricPLLVec(const Graph &G) { construct(G); } template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots) // CAS needs array { idi num_v = G.get_num_v(); idi num_e = G.get_num_e(); if (num_v <= BITPARALLEL_SIZE) { // if (true) {} // Sequential version std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } else { // Parallel version: Naive parallel enqueue std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } #pragma omp parallel for for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } } //inline void ParaVertexCentricPLLVec::bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<uint8_t> &used_bp_roots) //{ // idi num_v = G.get_num_v(); // idi num_e = G.get_num_e(); // //// std::vector<smalli> tmp_d(num_v); // distances from the root to every v // smalli *tmp_d = (smalli *) malloc(num_v * sizeof(smalli)); // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector< std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector< std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // //// fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // memset(tmp_d, (uint8_t) -1, num_v * sizeof(smalli)); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) { // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (smalli d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // // For parallel adding to que // idi que_size = que_t1 - que_t0; // vector<idi> offsets_tmp_queue(que_size); //#pragma omp parallel for // for (idi i_q = 0; i_q < que_size; ++i_q) { // offsets_tmp_queue[i_q] = G.out_degrees[que[que_t0 + i_q]]; // } // idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // vector<idi> tmp_que(num_neighbors); // vector<idi> sizes_tmp_que(que_size, 0); // // For parallel adding to sibling_es // vector< pair<idi, idi> > tmp_sibling_es(num_neighbors); // vector<idi> sizes_tmp_sibling_es(que_size, 0); // // For parallel adding to child_es // vector< pair<idi, idi> > tmp_child_es(num_neighbors); // vector<idi> sizes_tmp_child_es(que_size, 0); // //#pragma omp parallel for // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi tmp_que_i = que_i - que_t0; // location in the tmp_que // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // smalli td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // idi &size_in_group = sizes_tmp_sibling_es[tmp_que_i]; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// sibling_es[num_sibling_es].first = v; //// sibling_es[num_sibling_es].second = tv; //// ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // if (CAS(tmp_d + tv, SMALLI_MAX, td)) { // tmp_d[tv] = td // tmp_que[offsets_tmp_queue[tmp_que_i] + sizes_tmp_que[tmp_que_i]++] = tv; // } // } //// if (tmp_d[tv] == SMALLI_MAX) { //// que[que_h++] = tv; //// tmp_d[tv] = td; //// } // idi &size_in_group = sizes_tmp_child_es[tmp_que_i]; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// child_es[num_child_es].first = v; //// child_es[num_child_es].second = tv; //// ++num_child_es; // } // } // } // // // From tmp_sibling_es to sibling_es // idi total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_sibling_es); // collect_into_queue( // tmp_sibling_es, // offsets_tmp_queue, // sizes_tmp_sibling_es, // total_sizes_tmp_queue, // sibling_es, // num_sibling_es); // //#pragma omp parallel for // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // __sync_or_and_fetch(&tmp_s[v].second, tmp_s[w].first); // __sync_or_and_fetch(&tmp_s[w].second, tmp_s[v].first); //// tmp_s[v].second |= tmp_s[w].first; //// tmp_s[w].second |= tmp_s[v].first; // } // // // From tmp_child_es to child_es // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_child_es); // collect_into_queue( // tmp_child_es, // offsets_tmp_queue, // sizes_tmp_child_es, // total_sizes_tmp_queue, // child_es, // num_child_es); // //#pragma omp parallel for // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // __sync_or_and_fetch(&tmp_s[c].first, tmp_s[v].first); // __sync_or_and_fetch(&tmp_s[c].second, tmp_s[v].second); //// tmp_s[c].first |= tmp_s[v].first; //// tmp_s[c].second |= tmp_s[v].second; // } // // // From tmp_que to que // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_que); // collect_into_queue( // tmp_que, // offsets_tmp_queue, // sizes_tmp_que, // total_sizes_tmp_queue, // que, // que_h); // // que_t0 = que_t1; // que_t1 = que_h; // } // //#pragma omp parallel for // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // // free(tmp_d); //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::initialize( vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots) { idi roots_bound = roots_start + roots_size; // init_start_reset_time -= WallTimer::get_time_mark(); // TODO: parallel enqueue { // active_queue for (idi r_real_id = roots_start; r_real_id < roots_bound; ++r_real_id) { if (!used_bp_roots[r_real_id]) { active_queue[end_active_queue++] = r_real_id; } } } // init_start_reset_time += WallTimer::get_time_mark(); // init_index_time -= WallTimer::get_time_mark(); // Short_index { // init_indicators_time -= WallTimer::get_time_mark(); if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } else { for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } //#pragma omp parallel for // for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); // once_candidated[v] = 0; // } end_once_candidated_queue = 0; if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } else { for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } // for (idi v = roots_start; v < roots_bound; ++v) { // if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels // } // } // init_indicators_time += WallTimer::get_time_mark(); } // // Real Index { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); Lr.last_start = Lr.vertices.size(); Lr.last_size = Lr.last_start + 1; Lr.vertices.push_back(r_id + roots_start); Lr.label_dists.push_back(0); // Distance 0 } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); Lr.last_start = Lr.vertices.size(); Lr.last_size = Lr.last_start + 1; Lr.vertices.push_back(r_id + roots_start); Lr.label_dists.push_back(0); // Distance 0 } } // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); // } } // init_index_time += WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); // Dist_matrix { if (roots_size >= THRESHOLD_PARALLEL) { // schedule dynamic is slower #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; // Traverse r's all labels idi bound_l_i = Lr.vertices.size(); for (idi l_i = 0; l_i < bound_l_i; ++l_i) { dist_matrix[r_id][Lr.vertices[l_i]] = Lr.label_dists[l_i]; } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; smalli dist; for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; // Traverse r's all labels idi bound_l_i = Lr.vertices.size(); for (idi l_i = 0; l_i < bound_l_i; ++l_i) { dist_matrix[r_id][Lr.vertices[l_i]] = Lr.label_dists[l_i]; } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // smalli dist; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // dist = Lr.distances[dist_i].dist; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; // } // } // } // } } // init_dist_matrix_time += WallTimer::get_time_mark(); } // Function that pushes v_head's labels to v_head's every neighbor template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, idi &offset_tmp_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, // vector<idi> &once_candidated_queue, // idi &end_once_candidated_queue, vector<idi> &tmp_once_candidated_queue, idi &size_tmp_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter) { // const inti REMAINDER_BP = BITPARALLEL_SIZE % NUM_P_BP_LABEL; // const inti BOUND_BP_I = BITPARALLEL_SIZE - REMAINDER_BP; // const __mmask8 IN_EPI64_M = (__mmask8) ((uint8_t) 0xFF >> (NUM_P_BP_LABEL - REMAINDER_BP)); // const __mmask16 IN_128I_M = (__mmask16) ((uint16_t) 0xFFFF >> (NUM_P_INT - REMAINDER_BP)); __m512i iter_v = _mm512_set1_epi64(iter); __m512i iter_plus_2_v = _mm512_set1_epi64(iter + 2); const IndexType &Lv = L[v_head]; // These 2 index are used for traversing v_head's last inserted labels idi l_i_start = Lv.last_start; idi l_i_bound = Lv.last_size; // Traverse v_head's every neighbor v_tail idi e_i_start = G.vertices[v_head]; idi e_i_bound = e_i_start + G.out_degrees[v_head]; for (idi e_i = e_i_start; e_i < e_i_bound; ++e_i) { idi v_tail = G.out_edges[e_i]; if (used_bp_roots[v_head]) { continue; } if (v_tail < roots_start) { // v_tail has higher rank than any roots, then no roots can push new labels to it. return; } // if (v_tail <= Lv.vertices[l_i_start] + roots_start) { // v_tail has higher rank than any v_head's labels // return; // } // This condition cannot be used anymore since v_head's last inserted labels are not ordered from higher rank to lower rank now, because v_head's candidate set is a queue now rather than a bitmap. For a queue, its order of candidates are not ordered by ranks. const IndexType &L_tail = L[v_tail]; _mm_prefetch(&L_tail.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets_0[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets_1[0], _MM_HINT_T0); // _mm_prefetch(&L_tail.bp_sets[0][0], _MM_HINT_T0); // Traverse v_head's last inserted labels for (idi l_i = l_i_start; l_i < l_i_bound; ++l_i) { // inti label_root_id = Lv.vertices[l_i]; // idi label_real_id = label_root_id + roots_start; idi label_real_id = Lv.vertices[l_i]; idi label_root_id = label_real_id - roots_start; if (v_tail <= label_real_id) { // v_tail has higher rank than all remaining labels // For candidates_que, this is not true any more! // break; continue; } ShortIndex &SI_v_tail = short_index[v_tail]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail]) { // If v_tail is not in the once_candidated_queue yet, add it in if (CAS(&once_candidated[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail; } } // CHANGED! // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // ++total_check_count; const IndexType &L_label = L[label_real_id]; bool no_need_add = false; _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets_0[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets_1[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); // Vectorization Version for (idi i = 0; i < BOUND_BP_I; i += NUM_P_BP_LABEL) { // Distance from label to BP root __m128i tmp_dist_l_r_128i = _mm_loadu_epi8(&L_label.bp_dist[i]); // @suppress("Function cannot be resolved") __m512i dist_l_r_v = _mm512_cvtepi8_epi64(tmp_dist_l_r_128i); // Distance from tail to BP root __m128i tmp_dist_t_r_128i = _mm_loadu_epi8(&L_tail.bp_dist[i]); // @suppress("Function cannot be resolved") __m512i dist_t_r_v = _mm512_cvtepi8_epi64(tmp_dist_t_r_128i); // BP-label distance from label to tail __m512i td_v = _mm512_add_epi64(dist_l_r_v, dist_t_r_v); // Compari BP-label dist to td_plus_2 __mmask8 is_bp_dist_shorter = _mm512_cmple_epi64_mask(td_v, iter_plus_2_v); if (is_bp_dist_shorter) { __m512i l_sets_0_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, is_bp_dist_shorter, &L_label.bp_sets_0[i]); // @suppress("Function cannot be resolved") __m512i t_sets_0_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, is_bp_dist_shorter, &L_tail.bp_sets_0[i]); // @suppress("Function cannot be resolved") __mmask8 can_both_reach_m = _mm512_mask_cmpneq_epi64_mask( is_bp_dist_shorter, ZERO_epi64_v, _mm512_mask_and_epi64(ZERO_epi64_v, is_bp_dist_shorter, l_sets_0_v, t_sets_0_v)); if (can_both_reach_m) { td_v = _mm512_mask_add_epi64(td_v, can_both_reach_m, td_v, MINUS_2_epi64_v); } else { __mmask8 compound_mask = is_bp_dist_shorter & (~can_both_reach_m); __m512i l_sets_1_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, compound_mask, &L_label.bp_sets_1[i]); // @suppress("Function cannot be resolved") __m512i t_sets_1_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, compound_mask, &L_tail.bp_sets_1[i]); // @suppress("Function cannot be resolved") __mmask8 can_single_reach_m = _mm512_mask_cmpneq_epi64_mask( compound_mask, ZERO_epi64_v, _mm512_mask_or_epi64( ZERO_epi64_v, compound_mask, _mm512_mask_and_epi64(ZERO_epi64_v, compound_mask, l_sets_0_v, t_sets_1_v), _mm512_mask_and_epi64(ZERO_epi64_v, compound_mask, l_sets_1_v, t_sets_0_v))); if (can_single_reach_m) { td_v = _mm512_mask_add_epi64(td_v, can_single_reach_m, td_v, MINUS_1_epi64_v); } } if (_mm512_mask_cmple_epi64_mask(is_bp_dist_shorter, td_v, iter_v)) { no_need_add = true; break; } } } // if (true) { // // __mmask8 in_epi64_m = (__mmask8) ((uint8_t) 0xFF >> (NUM_P_BP_LABEL - remainder_simd)); // // __mmask16 in_128i_m = (__mmask16) ((uint16_t) 0xFFFF >> (NUM_P_INT - remainder_simd)); // // Distance from label to BP root // __m128i tmp_dist_l_r_128i = _mm_mask_loadu_epi8(INF_v_128i, IN_128I_M, &L_label.bp_dist[BOUND_BP_I]); // __m512i dist_l_r_v = _mm512_mask_cvtepi8_epi64(INF_v_epi64, IN_EPI64_M, tmp_dist_l_r_128i); // // Distance from tail to BP root // __m128i tmp_dist_t_r_128i = _mm_mask_loadu_epi8(INF_v_128i, IN_128I_M, &L_tail.bp_dist[BOUND_BP_I]); // __m512i dist_t_r_v = _mm512_mask_cvtepi8_epi64(INF_v_epi64, IN_EPI64_M, tmp_dist_t_r_128i); // // BP-label distance from label to tail // __m512i td_v = _mm512_mask_add_epi64(INF_v_epi64, IN_EPI64_M, dist_l_r_v, dist_t_r_v); // // Compari BP-label dist to td_plus_2 // __mmask8 is_bp_dist_shorter = _mm512_mask_cmple_epi64_mask(IN_EPI64_M, td_v, iter_plus_2_v); // if (is_bp_dist_shorter) { // __m512i l_sets_0_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, is_bp_dist_shorter, &L_label.bp_sets_0[BOUND_BP_I]); // @suppress("Function cannot be resolved") // __m512i t_sets_0_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, is_bp_dist_shorter, &L_tail.bp_sets_0[BOUND_BP_I]); // @suppress("Function cannot be resolved") // // __mmask8 can_both_reach_m = _mm512_mask_cmpneq_epi64_mask( // is_bp_dist_shorter, // ZERO_epi64_v, // _mm512_mask_and_epi64(ZERO_epi64_v, is_bp_dist_shorter, l_sets_0_v, t_sets_0_v)); // if (can_both_reach_m) { // td_v = _mm512_mask_add_epi64(td_v, can_both_reach_m, td_v, MINUS_2_epi64_v); // } else { // __mmask8 compound_mask = is_bp_dist_shorter & (~can_both_reach_m); // __m512i l_sets_1_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, compound_mask, &L_label.bp_sets_1[BOUND_BP_I]); // @suppress("Function cannot be resolved") // __m512i t_sets_1_v = _mm512_mask_loadu_epi64(ZERO_epi64_v, compound_mask, &L_tail.bp_sets_1[BOUND_BP_I]); // @suppress("Function cannot be resolved") // __mmask8 can_single_reach_m = _mm512_mask_cmpneq_epi64_mask( // compound_mask, // ZERO_epi64_v, // _mm512_mask_or_epi64( // ZERO_epi64_v, // compound_mask, // _mm512_mask_and_epi64(ZERO_epi64_v, compound_mask, l_sets_0_v, t_sets_1_v), // _mm512_mask_and_epi64(ZERO_epi64_v, compound_mask, l_sets_1_v, t_sets_0_v))); // if (can_single_reach_m) { // td_v = _mm512_mask_add_epi64(td_v, can_single_reach_m, td_v, MINUS_1_epi64_v); // } // } // if (_mm512_mask_cmple_epi64_mask(is_bp_dist_shorter, td_v, iter_v)) { // no_need_add = true; // ++bp_hit_count; // // This is one of the most incredibly egregiously ugly bugs ever! 02/05/2019 // // break; // The break should never be here, as it is in a if-condition statement rather than a for-loop. // } // } // } for (inti i = BOUND_BP_I; i < BITPARALLEL_SIZE; ++i) { inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets_0[i] & L_tail.bp_sets_0[i]) ? -2 : ((L_label.bp_sets_0[i] & L_tail.bp_sets_1[i]) | (L_label.bp_sets_1[i] & L_tail.bp_sets_0[i])) ? -1 : 0; if (td <= iter) { no_need_add = true; // break; } } } // // Sequential Version // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets_0[i] & L_tail.bp_sets_0[i]) ? -2 : // ((L_label.bp_sets_0[i] & L_tail.bp_sets_1[i]) | // (L_label.bp_sets_1[i] & L_tail.bp_sets_0[i])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; // break; // } // } // } if (no_need_add) { continue; } // Record vertex label_root_id as v_tail's candidates label // SI_v_tail.candidates.set(label_root_id); // if (!SI_v_tail.is_candidate[label_root_id]) { // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // } if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(&SI_v_tail.is_candidate[label_root_id], (uint8_t) 0, (uint8_t) 1)) { TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); // volatile inti old_v = SI_v_tail.end_candidates_que; // volatile inti new_v = old_v + 1; // while (!CAS(&SI_v_tail.end_candidates_que, old_v, new_v)) { // old_v = SI_v_tail.end_candidates_que; // new_v = old_v + 1; // } // SI_v_tail.candidates_que[old_v] = label_root_id; } } // Add into candidate_queue if (!got_candidates[v_tail]) { // If v_tail is not in candidate_queue, add it in (prevent duplicate) if (CAS(&got_candidates[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_candidate_queue[offset_tmp_queue + size_tmp_candidate_queue++] = v_tail; } } // // Add into once_candidated_queue //#pragma omp critical // if (!once_candidated[v_tail]) { // // If v_tail is not in the once_candidated_queue yet, add it in // once_candidated[v_tail] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail; // } // // Add into candidate_queue // if (!got_candidates[v_tail]) { // // If v_tail is not in candidate_queue, add it in (prevent duplicate) // got_candidates[v_tail] = true; // candidate_queue[end_candidate_queue++] = v_tail; // } } } // printf("v_head: %u, size_tmp_candidate_queue: %u\n", v_head, size_tmp_candidate_queue);//test } // Function for distance query; // traverse vertex v_id's labels; // return the distance between v_id and cand_root_id based on existing labels. // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <inti BATCH_SIZE> inline bool ParaVertexCentricPLLVec<BATCH_SIZE>::distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector< vector<smalli> > &dist_matrix, smalli iter) { // ++total_check_count; // distance_query_time -= WallTimer::get_time_mark(); idi cand_real_id = cand_root_id + roots_start; __m512i cand_real_id_v = _mm512_set1_epi32(cand_real_id); // For vectorized version __m512i iter_v = _mm512_set1_epi32(iter); const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels // Vectorization Version inti remainder_simd = Lv.last_size % NUM_P_INT; idi bound_l_i = Lv.last_size - remainder_simd; for (idi l_i = 0; l_i < bound_l_i; l_i += NUM_P_INT) { // Distances __m128i tmp_dist_v_r_128i = _mm_loadu_epi8(&Lv.label_dists[l_i]); // @suppress("Function cannot be resolved") __m512i dist_v_r_v = _mm512_cvtepi8_epi32(tmp_dist_v_r_128i); __mmask16 is_v_r_shorter = _mm512_cmplt_epi32_mask(dist_v_r_v, iter_v); if (!is_v_r_shorter) { continue; } // IDs __m512i r_real_id_v = _mm512_mask_loadu_epi32(UNDEF_i32_v, is_v_r_shorter, &Lv.vertices[l_i]); // @suppress("Function cannot be resolved") __mmask16 is_r_higher_ranked_m = _mm512_mask_cmplt_epi32_mask(is_v_r_shorter, r_real_id_v, cand_real_id_v); if (!is_r_higher_ranked_m) { continue; } // Distance from r to c __m512i dist_r_c_v = _mm512_mask_i32gather_epi32(INF_v, is_r_higher_ranked_m, r_real_id_v, &dist_matrix[cand_root_id][0], sizeof(weighti)); dist_r_c_v = _mm512_mask_and_epi32(INF_v, is_r_higher_ranked_m, dist_r_c_v, LOWEST_BYTE_MASK); // Query distance from v to c __m512i d_tmp_v = _mm512_mask_add_epi32(INF_v, is_r_higher_ranked_m, dist_v_r_v, dist_r_c_v); __mmask16 is_query_shorter = _mm512_mask_cmple_epi32_mask(is_r_higher_ranked_m, d_tmp_v, iter_v); if (is_query_shorter) { return false; } } if (remainder_simd) { __mmask16 in_m = (__mmask16) ((uint16_t) 0xFFFF >> (NUM_P_INT - remainder_simd)); // Distances __m128i tmp_dist_v_r_128i = _mm_mask_loadu_epi8(INF_v_128i, in_m, &Lv.label_dists[bound_l_i]); // @suppress("Function cannot be resolved") __m512i dist_v_r_v = _mm512_mask_cvtepi8_epi32(INF_v, in_m, tmp_dist_v_r_128i); __mmask16 is_v_r_shorter = _mm512_mask_cmplt_epi32_mask(in_m, dist_v_r_v, iter_v); if (is_v_r_shorter) { // IDs __m512i r_real_id_v = _mm512_mask_loadu_epi32(UNDEF_i32_v, is_v_r_shorter, &Lv.vertices[bound_l_i]); // @suppress("Function cannot be resolved") __mmask16 is_r_higher_ranked_m = _mm512_mask_cmplt_epi32_mask(is_v_r_shorter, r_real_id_v, cand_real_id_v); if (is_r_higher_ranked_m) { // Distance from r to c !!!!! __m512i dist_r_c_v = _mm512_mask_i32gather_epi32(INF_v, is_r_higher_ranked_m, r_real_id_v, &dist_matrix[cand_root_id][0], sizeof(weighti)); dist_r_c_v = _mm512_mask_and_epi32(INF_v, is_r_higher_ranked_m, dist_r_c_v, LOWEST_BYTE_MASK); // Query distance from v to c __m512i d_tmp_v = _mm512_mask_add_epi32(INF_v, is_r_higher_ranked_m, dist_v_r_v, dist_r_c_v); __mmask16 is_query_shorter = _mm512_mask_cmple_epi32_mask(is_r_higher_ranked_m, d_tmp_v, iter_v); if (is_query_shorter) { return false; } } } } // // Sequential Version // idi bound_l_i = Lv.last_size; // for (idi l_i = 0; l_i < bound_l_i; ++l_i) { // weighti dist_v_r = Lv.label_dists[l_i]; // if (dist_v_r >= iter) { // continue; // } // idi r_real_id = Lv.vertices[l_i]; // if (cand_real_id <= r_real_id) { // continue; // } // inti d_tmp = dist_v_r + dist_matrix[cand_root_id][r_real_id]; // if (d_tmp <= iter) { // return false; // } // } // distance_query_time += WallTimer::get_time_mark(); return true; } // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_matrix; // but it only update the v_id's labels' vertices array; template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix, smalli iter) { L[v_id].vertices.push_back(cand_root_id + roots_start); // Update the distance buffer if necessary idi v_root_id = v_id - roots_start; if (v_id >= roots_start && v_root_id < roots_size) { dist_matrix[v_root_id][cand_root_id + roots_start] = iter; } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter) { IndexType &Lv = L[v_id]; // // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch // if (short_index[v_id].indicator[BATCH_SIZE]) { // // Increase the batches' last element's size because a new distance element need to be added // ++(Lv.batches.rbegin() -> size); // } else { // short_index[v_id].indicator.set(BATCH_SIZE); // // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added // Lv.batches.push_back(IndexType::Batch( // b_id, // Lv.distances.size(), // 1)); // } // // Insert a new distance element with start_index, size, and dist // Lv.distances.push_back(IndexType::DistanceIndexType( // Lv.vertices.size() - inserted_count, // inserted_count, // iter)); Lv.last_start = Lv.last_size; Lv.last_size += inserted_count; for (idi i = 0; i < inserted_count; ++i) { Lv.label_dists.push_back(iter); } } // Function to reset dist_matrix the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix) { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; // Traverse r's all labels idi bound_l_i = Lr.vertices.size(); for (idi l_i = 0; l_i < bound_l_i; ++l_i) { dist_matrix[r_id][Lr.vertices[l_i]] = WEIGHTI_MAX; } } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; // Traverse r's all labels idi bound_l_i = Lr.vertices.size(); for (idi l_i = 0; l_i < bound_l_i; ++l_i) { dist_matrix[r_id][Lr.vertices[l_i]] = WEIGHTI_MAX; } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; // } // } // } // } } template <inti BATCH_SIZE> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated) //inline void ParaVertexCentricPLLVec::batch_process( // const Graph &G, // idi b_id, // idi roots_start, // start id of roots // inti roots_size, // how many roots in the batch // vector<IndexType> &L, // const vector<bool> &used_bp_roots) { // initializing_time -= WallTimer::get_time_mark(); // static const idi num_v = G.get_num_v(); // static vector<idi> active_queue(num_v); // static idi end_active_queue = 0; // static vector<idi> candidate_queue(num_v); // static idi end_candidate_queue = 0; // static vector<ShortIndex> short_index(num_v); // static vector< vector<smalli> > dist_matrix(roots_size, vector<smalli>(num_v, SMALLI_MAX)); // static uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // static uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // static vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. // static idi end_once_candidated_queue = 0; // static uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // At the beginning of a batch, initialize the labels L and distance buffer dist_matrix; // printf("initializing...\n");//test initialize( short_index, dist_matrix, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, L, used_bp_roots); smalli iter = 0; // The iterator, also the distance for current iteration // initializing_time += WallTimer::get_time_mark(); while (0 != end_active_queue) { // candidating_time -= WallTimer::get_time_mark(); ++iter; // Pushing // printf("pushing...\n");//test { // Prepare for parallel processing the active_queue and adding to candidate_queue. // Every vertex's offset location in tmp_candidate_queue // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_active_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. offsets_tmp_queue[i_queue] = G.out_degrees[active_queue[i_queue]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_candidate_queue(num_neighbors); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_candidate_queue(end_active_queue, 0); // similarly, every thread writes to tmp_once_candidated_queue at its offset location vector<idi> tmp_once_candidated_queue(num_neighbors); // And store the true number of new added once-candidated vertices. vector<idi> sizes_tmp_once_candidated_queue(end_active_queue, 0); // Traverse active vertices to push their labels as candidates // schedule dynamic is slower #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { idi v_head = active_queue[i_queue]; is_active[v_head] = 0; // reset is_active push_labels( v_head, roots_start, G, L, short_index, // candidate_queue, // end_candidate_queue, tmp_candidate_queue, sizes_tmp_candidate_queue[i_queue], offsets_tmp_queue[i_queue], got_candidates, // once_candidated_queue, // end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_queue], once_candidated, used_bp_roots, iter); } // According to sizes_tmp_candidate_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_candidate_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_candidate_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_candidate_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue candidate_queue, end_candidate_queue); // Get the offset for inserting to the real queue. total_new = prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // Collect all once-candidated vertices from tmp_once_candidated_queue into once_candidated_queue collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, sizes_tmp_once_candidated_queue, total_new, once_candidated_queue, end_once_candidated_queue); // printf("end_candidate_queue: %u\n", end_candidate_queue); fflush(stdout);//test end_active_queue = 0; // Set the active_queue empty } // candidating_time += WallTimer::get_time_mark(); if (end_candidate_queue == 0) { break; } // adding_time -= WallTimer::get_time_mark(); // Adding // printf("adding...\n");//test { // Prepare for parallel processing the candidate_queue and adding to active_queue. // Every vertex's offset location in tmp_active_queue is i_queue * roots_size // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_candidate_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. // A ridiculous bug here. The v_id will, if any, only add itself to the active queue. //offsets_tmp_queue[i_queue] = i_queue * roots_size; offsets_tmp_queue[i_queue] = i_queue; } // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_active_queue(end_candidate_queue); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_active_queue(end_candidate_queue, 0); // Traverse vertices in the candidate_queue to insert labels // Here schedule dynamic will be slower #pragma omp parallel for schedule(dynamic) for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { idi v_id = candidate_queue[i_queue]; inti inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id] = 0; // reset got_candidates bool be_active = false; // flag, if be_active is ture, v_id needs to be marked as active (enqueue active_queue) // Traverse v_id's all candidates // for (inti cand_root_id = 0; cand_root_id < roots_size; ++cand_root_id) { // if (!short_index[v_id].candidates[cand_root_id]) { // // Root cand_root_id is not vertex v_id's candidate // continue; // } // short_index[v_id].candidates.reset(cand_root_id); inti bound_cand_i = short_index[v_id].end_candidates_que; for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { inti cand_root_id = short_index[v_id].candidates_que[cand_i]; short_index[v_id].is_candidate[cand_root_id] = 0; // Reset is_candidate // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if ( distance_query( cand_root_id, v_id, roots_start, L, dist_matrix, iter) ) { if (!be_active) { be_active = true; } // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only( cand_root_id, v_id, roots_start, roots_size, L, dist_matrix, iter); } } short_index[v_id].end_candidates_que = 0; // } if (be_active) { if (CAS(&is_active[v_id], (uint8_t) 0, (uint8_t) 1)) { tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; } } if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id, inserted_count, L, short_index, b_id, iter); } } // According to sizes_tmp_active_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_active_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_active_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_active_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue active_queue, end_active_queue); end_candidate_queue = 0; // Set the candidate_queue empty } // adding_time += WallTimer::get_time_mark(); } // Reset the dist_matrix // initializing_time -= WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); reset_at_end( roots_start, roots_size, L, dist_matrix); // init_dist_matrix_time += WallTimer::get_time_mark(); // initializing_time += WallTimer::get_time_mark(); // double total_time = time_can + time_add; // printf("Candidating time: %f (%f%%)\n", time_can, time_can / total_time * 100); // printf("Adding time: %f (%f%%)\n", time_add, time_add / total_time * 100); } template <inti BATCH_SIZE> void ParaVertexCentricPLLVec<BATCH_SIZE>::construct(const Graph &G) { // initializing_time -= WallTimer::get_time_mark(); idi num_v = G.get_num_v(); num_v_ = num_v; L.resize(num_v); idi remainer = num_v % BATCH_SIZE; idi b_i_bound = num_v / BATCH_SIZE; // uint8_t *used_bp_roots = (uint8_t *) calloc(num_v, sizeof(uint8_t)); vector<uint8_t> used_bp_roots(num_v, 0); vector<idi> active_queue(num_v); idi end_active_queue = 0; vector<idi> candidate_queue(num_v); idi end_candidate_queue = 0; vector<ShortIndex> short_index(num_v); vector< vector<smalli> > dist_matrix(BATCH_SIZE, vector<smalli>(num_v, SMALLI_MAX)); // uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> got_candidates(num_v, 0); vector<uint8_t> is_active(num_v, 0); vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. idi end_once_candidated_queue = 0; // uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> once_candidated(num_v, 0); // initializing_time += WallTimer::get_time_mark(); double time_labeling = -WallTimer::get_time_mark(); // double bp_labeling_time = -WallTimer::get_time_mark(); // printf("BP labeling...\n"); //test bit_parallel_labeling( G, L, used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); for (idi b_i = 0; b_i < b_i_bound; ++b_i) { // printf("b_i: %u\n", b_i);//test batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i, // b_i * BATCH_SIZE, // BATCH_SIZE, // L, // used_bp_roots); } if (remainer != 0) { // printf("b_i: %u the last batch\n", b_i_bound);//test batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i_bound, // b_i_bound * BATCH_SIZE, // remainer, // L, // used_bp_roots); } time_labeling += WallTimer::get_time_mark(); // free(got_candidates); // free(is_active); // free(once_candidated); // free(used_bp_roots); // Test printf("Threads: %u Batch_size: %u\n", NUM_THREADS, BATCH_SIZE); // printf("BP_labeling: %.2f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); printf("BP_Roots_Size: %u\n", BITPARALLEL_SIZE); // printf("Initializing: %.2f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %.2f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %.2f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("\tdistance_query_time: %f (%f%%)\n", distance_query_time, distance_query_time / adding_time * 100); // printf("\ttotal_check_count: %llu\n", total_check_count); // printf("\tbp_hit_count (to total_check): %llu (%f%%)\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); printf("Total_labeling_time: %.2f seconds\n", time_labeling); // End test } // Function to get the prefix sum of elements in offsets template <inti BATCH_SIZE> inline idi ParaVertexCentricPLLVec<BATCH_SIZE>::prefix_sum_for_offsets( vector<idi> &offsets) { idi size_offsets = offsets.size(); if (1 == size_offsets) { idi tmp = offsets[0]; offsets[0] = 0; return tmp; } else if (size_offsets < 2048) { idi offset_sum = 0; idi size = size_offsets; for (idi i = 0; i < size; ++i) { idi tmp = offsets[i]; offsets[i] = offset_sum; offset_sum += tmp; } return offset_sum; } else { // Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications idi last_element = offsets[size_offsets - 1]; // idi size = 1 << ((idi) log2(size_offsets - 1) + 1); idi size = 1 << ((idi) log2(size_offsets)); // vector<idi> nodes(size, 0); idi tmp_element = offsets[size - 1]; //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // nodes[i] = offsets[i]; // } // Up-Sweep (Reduce) Phase idi log2size = log2(size); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; } } // Down-Sweep Phase offsets[size - 1] = 0; for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { idi t = offsets[k + (1 << d) - 1]; offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; offsets[k + (1 << (d + 1)) - 1] += t; } } //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // offsets[i] = nodes[i]; // } if (size != size_offsets) { idi tmp_sum = offsets[size - 1] + tmp_element; for (idi i = size; i < size_offsets; ++i) { idi t = offsets[i]; offsets[i] = tmp_sum; tmp_sum += t; } } return offsets[size_offsets - 1] + last_element; } // // Get the offset as the prefix sum of out degrees // idi offset_sum = 0; // idi size = offsets.size(); // for (idi i = 0; i < size; ++i) { // idi tmp = offsets[i]; // offsets[i] = offset_sum; // offset_sum += tmp; // } // return offset_sum; //// Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications // idi size_offsets = offsets.size(); // idi last_element = offsets[size_offsets - 1]; //// idi size = 1 << ((idi) log2(size_offsets - 1) + 1); // idi size = 1 << ((idi) log2(size_offsets)); //// vector<idi> nodes(size, 0); // idi tmp_element = offsets[size - 1]; ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// nodes[i] = offsets[i]; //// } // // // Up-Sweep (Reduce) Phase // idi log2size = log2(size); // for (idi d = 0; d < log2size; ++d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; // } // } // // // Down-Sweep Phase // offsets[size - 1] = 0; // for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // idi t = offsets[k + (1 << d) - 1]; // offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; // offsets[k + (1 << (d + 1)) - 1] += t; // } // } // ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// offsets[i] = nodes[i]; //// } // if (size != offsets.size()) { // idi tmp_sum = offsets[size - 1] + tmp_element; // idi i_bound = offsets.size(); // for (idi i = size; i < i_bound; ++i) { // idi t = offsets[i]; // offsets[i] = tmp_sum; // tmp_sum += t; // } // } // // return offsets[size_offsets - 1] + last_element; } // Collect elements in the tmp_queue into the queue template <inti BATCH_SIZE> template <typename T> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::collect_into_queue( // vector<idi> &tmp_queue, vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue // vector<idi> &queue, vector<T> &queue, idi &end_queue) { if (0 == num_elements) { return; } idi i_bound = offsets_tmp_queue.size(); #pragma omp parallel for for (idi i = 0; i < i_bound; ++i) { idi i_q_start = end_queue + offsets_queue[i]; idi i_q_bound; if (i_bound - 1 != i) { i_q_bound = end_queue + offsets_queue[i + 1]; } else { i_q_bound = end_queue + num_elements; } if (i_q_start == i_q_bound) { // If the group has no elements to be added, then continue to the next group continue; } idi end_tmp = offsets_tmp_queue[i]; for (idi i_q = i_q_start; i_q < i_q_bound; ++i_q) { queue[i_q] = tmp_queue[end_tmp++]; } } end_queue += num_elements; } // Function: thread-save enqueue. The queue has enough size already. An index points the end of the queue. template <inti BATCH_SIZE> template <typename T, typename Int> inline void ParaVertexCentricPLLVec<BATCH_SIZE>::TS_enqueue( vector<T> &queue, Int &end_queue, const T &e) { volatile Int old_i = end_queue; volatile Int new_i = old_i + 1; while (!CAS(&end_queue, old_i, new_i)) { old_i = end_queue; new_i = old_i + 1; } queue[old_i] = e; } template <inti BATCH_SIZE> void ParaVertexCentricPLLVec<BATCH_SIZE>::store_index_to_file( const char *filename, const vector<idi> &rank) { ofstream fout(filename); if (!fout.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } // Store into file the number of vertices and the number of bit-parallel roots. uint64_t labels_count = 0; fout.write((char *) &num_v_, sizeof(num_v_)); fout.write((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); for (idi v_id = 0; v_id < num_v_; ++v_id) { idi v_rank = rank[v_id]; const IndexType &Lv = L[v_rank]; idi size_labels = Lv.vertices.size(); labels_count += size_labels; // Store Bit-parallel Labels into file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { weighti d = Lv.bp_dist[b_i]; uint64_t s0 = Lv.bp_sets_0[b_i]; uint64_t s1 = Lv.bp_sets_1[b_i]; fout.write((char *) &d, sizeof(d)); fout.write((char *) &s0, sizeof(s0)); fout.write((char *) &s1, sizeof(s1)); } vector< std::pair<idi, weighti> > ordered_labels; // Traverse v_id's all existing labels for (idi l_i = 0; l_i < size_labels; ++l_i) { ordered_labels.push_back(std::make_pair(Lv.vertices[l_i], Lv.label_dists[l_i])); } // Sort sort(ordered_labels.begin(), ordered_labels.end()); // Store into file fout.write((char *) &size_labels, sizeof(size_labels)); for (idi l_i = 0; l_i < size_labels; ++l_i) { idi l = ordered_labels[l_i].first; weighti d = ordered_labels[l_i].second; fout.write((char *) &l, sizeof(l)); fout.write((char *) &d, sizeof(d)); } } printf("Label_size: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v_); fout.close(); } template <inti BATCH_SIZE> void ParaVertexCentricPLLVec<BATCH_SIZE>::load_index_from_file( const char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi num_v; // Load from file the number of vertices and the number of bit-parallel roots. fin.read((char *) &num_v, sizeof(num_v)); fin.read((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); num_v_ = num_v; Index.resize(num_v); uint64_t labels_count = 0; // Load labels for every vertex for (idi v_id = 0; v_id < num_v; ++v_id) { IndexOrdered &Iv = Index[v_id]; // Load Bit-parallel Labels from file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { fin.read((char *) &Iv.bp_dist[b_i], sizeof(Iv.bp_dist[b_i])); fin.read((char *) &Iv.bp_sets[b_i][0], sizeof(Iv.bp_sets[b_i][0])); fin.read((char *) &Iv.bp_sets[b_i][1], sizeof(Iv.bp_sets[b_i][1])); } // Normal Labels // Load Labels from file. idi size_labels; fin.read((char *) &size_labels, sizeof(size_labels)); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); Iv.label_dists.resize(size_labels + 1); for (idi l_i = 0; l_i < size_labels; ++l_i) { fin.read((char *) &Iv.label_id[l_i], sizeof(Iv.label_id[l_i])); fin.read((char *) &Iv.label_dists[l_i], sizeof(Iv.label_dists[l_i])); } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = (weighti) -1; // Sentinel } printf("Label_size_loaded: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); fin.close(); } template <inti BATCH_SIZE> void ParaVertexCentricPLLVec<BATCH_SIZE>::order_labels( const vector<idi> &rank2id, const vector<idi> &rank) { idi num_v = rank.size(); vector< vector< pair<idi, weighti> > > ordered_L(num_v); idi labels_count = 0; Index.resize(num_v); // Traverse the L, put them into Index (ordered labels) for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; IndexOrdered & Iv = Index[new_v]; const IndexType &Lv = L[v_id]; auto &OLv = ordered_L[new_v]; // Bit-parallel Labels memcpy(&Iv.bp_dist, &Lv.bp_dist, BITPARALLEL_SIZE * sizeof(weighti)); for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { Iv.bp_sets[b_i][0] = Lv.bp_sets_0[b_i]; Iv.bp_sets[b_i][1] = Lv.bp_sets_1[b_i]; } // Normal Labels // Traverse v_id's all existing labels for (idi l_i = 0; l_i < Lv.vertices.size(); ++l_i) { OLv.push_back(make_pair(Lv.vertices[l_i], Lv.label_dists[l_i])); } // Sort sort(OLv.begin(), OLv.end()); // Store into Index inti size_labels = OLv.size(); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); // Adding one for Sentinel Iv.label_dists.resize(size_labels + 1); // Adding one for Sentinel for (inti l_i = 0; l_i < size_labels; ++l_i) { Iv.label_id[l_i] = OLv[l_i].first; Iv.label_dists[l_i] = OLv[l_i].second; } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = WEIGHTI_MAX; // Sentinel } printf("Label_size: %u mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); // // Test // { // puts("Asserting..."); // for (idi v_id = 0; v_id < num_v; ++v_id) { // const IndexType &Lv = L[v_id]; // const IndexOrdered &Iv = Index[rank2id[v_id]]; // // Bit-parallel Labels // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // assert(Lv.bp_dist[b_i] == Iv.bp_dist[b_i]); // assert(Lv.bp_sets[b_i][0] == Iv.bp_sets[b_i][0]); // assert(Lv.bp_sets[b_i][1] == Iv.bp_sets[b_i][1]); // } // // Normal Labels // assert(Lv.vertices.size() == Iv.label_id.size()); // assert(Lv.vertices.size() == Iv.label_dists.size()); //// { //// inti bound_i = Iv.label_id.size() > 10 ? 10 : Iv.label_id.size(); //// printf("V %u:", rank2id[v_id]); //// for (inti i = 0; i < bound_i; ++i) { //// printf(" (%u, %u)", Iv.label_id[i], Iv.label_dists[i]); //// } //// puts(""); //// } // // } // puts("Asserted."); // } } template <inti BATCH_SIZE> weighti ParaVertexCentricPLLVec<BATCH_SIZE>::query_distance( idi a, idi b) { idi num_v = num_v_; if (a >= num_v || b >= num_v) { return a == b ? 0 : WEIGHTI_MAX; } // // A is shorter than B // IndexOrdered &Ia = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[b] : Index[a]; // // A is longer than B // IndexOrdered &Ia = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[b] : Index[a]; IndexOrdered &Ia = Index[a]; IndexOrdered &Ib = Index[b]; // const IndexOrdered &Ia = Index[a]; // const IndexOrdered &Ib = Index[b]; inti d = WEIGHTI_MAX; _mm_prefetch(&Ia.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ia.label_dists[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_dists[0], _MM_HINT_T0); // Bit-Parallel Labels for (int i = 0; i < BITPARALLEL_SIZE; ++i) { int td = Ia.bp_dist[i] + Ib.bp_dist[i]; if (td - 2 <= d) { td += (Ia.bp_sets[i][0] & Ib.bp_sets[i][0]) ? -2 : ((Ia.bp_sets[i][0] & Ib.bp_sets[i][1]) | (Ia.bp_sets[i][1] & Ib.bp_sets[i][0])) ? -1 : 0; if (td < d) { d = td; } } } // Normal Labels (ordered) // // Vectorizaed Version // vector<idi> &A = Ia.label_id; // vector<idi> &B = Ib.label_id; // idi len_B = B.size() - 1; //// idi len_B = B.size(); // idi bound_b_base_i = len_B - (len_B % NUM_P_INT); // idi a_i = 0; // idi b_base_i = 0; // idi len_A = A.size() - 1; //// idi len_A = A.size(); // ++length_larger_than_16.second; // if (len_B >= 16) { // ++length_larger_than_16.first; // } // while (a_i < len_A && b_base_i < bound_b_base_i) { // int a = A[a_i]; // __m512i a_v = _mm512_set1_epi32(a); // // // Packed b // __m512i b_v = _mm512_loadu_epi32(&B[b_base_i]); // @suppress("Function cannot be resolved") // __mmask16 is_equal_m = _mm512_cmpeq_epi32_mask(a_v, b_v); // if (is_equal_m) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i + (idi) (log2(is_equal_m))]; // if (td < d) { // d = td; // } // // // Advance index // if (is_equal_m & (__mmask16) 0x8000) { // ++a_i; // b_base_i += NUM_P_INT; // } else { // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } else { // // Advance index // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } // while (a_i < len_A && b_base_i < len_B) { // if (A[a_i] == B[b_base_i]) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i]; // if (td < d) { // d = td; // } // // // Advance index // ++a_i; // ++b_base_i; // } else { // // Advance index // a_i += (A[a_i] < B[b_base_i]) ? 1 : 0; // b_base_i += (B[b_base_i] < A[a_i]) ? 1 : 0; // } // } // Sequential Version for (idi i1 = 0, i2 = 0; ; ) { idi v1 = Ia.label_id[i1], v2 = Ib.label_id[i2]; if (v1 == v2) { if (v1 == num_v) { break; // Sentinel } inti td = Ia.label_dists[i1] + Ib.label_dists[i2]; if (td < d) { d = td; } ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= WEIGHTI_MAX - 2) { d = WEIGHTI_MAX; } return d; } template <inti BATCH_SIZE> void ParaVertexCentricPLLVec<BATCH_SIZE>::switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank) { idi label_sum = 0; idi test_label_sum = 0; // idi num_v = rank2id.size(); idi num_v = rank.size(); vector< vector< pair<idi, weighti> > > new_L(num_v); // for (idi r = 0; r < num_v; ++r) { // idi v = rank2id[r]; // const IndexType &Lr = L[r]; // IndexType &Lv = new_L[v]; // idi size = Lr.get_size(); // label_sum += size; // for (idi li = 0; li < size; ++li) { // idi l = Lr.get_label_ith_v(li); // idi new_l = rank2id[l]; // Lv.add_label_seq(new_l, Lr.get_label_ith_d(li)); // } // } // L = new_L; for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels label_sum += Lv.vertices.size(); for (idi l_i = 0; l_i < Lv.vertices.size(); ++l_i) { idi tail = Lv.vertices[l_i]; new_L[new_v].push_back(make_pair(tail, Lv.label_dists[l_i])); ++test_label_sum; } } printf("Label_sum: %u %u mean: %f\n", label_sum, test_label_sum, label_sum * 1.0 / num_v); // // Try to print // for (idi v = 0; v < num_v; ++v) { // const auto &Lv = new_L[v]; // idi size = Lv.size(); // printf("Vertex %u (Size %u):", v, size); // for (idi i = 0; i < size; ++i) { // printf(" (%u, %d)", Lv[i].first, Lv[i].second); // fflush(stdout); // } // puts(""); // } // // Try query // idi u; // idi v; // while (std::cin >> u >> v) { // weighti dist = WEIGHTI_MAX; // // Bit Parallel Check // const IndexType &idx_u = L[rank[u]]; // const IndexType &idx_v = L[rank[v]]; // // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // int td = idx_v.bp_dist[i] + idx_u.bp_dist[i]; // if (td - 2 <= dist) { // td += // (idx_v.bp_sets_0[i] & idx_u.bp_sets_0[i]) ? -2 : // ((idx_v.bp_sets_0[i] & idx_u.bp_sets_1[i]) // | (idx_v.bp_sets_1[i] & idx_u.bp_sets_0[i])) // ? -1 : 0; // if (td < dist) { // dist = td; // } // } // } // // // Normal Index Check // const auto &Lu = new_L[u]; // const auto &Lv = new_L[v]; //// unsorted_map<idi, weighti> markers; // map<idi, weighti> markers; // for (idi i = 0; i < Lu.size(); ++i) { // markers[Lu[i].first] = Lu[i].second; // } // for (idi i = 0; i < Lv.size(); ++i) { // const auto &tmp_l = markers.find(Lv[i].first); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + Lv[i].second; // if (d < dist) { // dist = d; // } // } // if (dist == 255) { // printf("2147483647\n"); // } else { // printf("%u\n", dist); // } // } } } #endif /* INCLUDES_PADO_H_ */
GB_binop__atan2_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__atan2_fp64 // A.*B function (eWiseMult): GB_AemultB__atan2_fp64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__atan2_fp64 // C+=b function (dense accum): GB_Cdense_accumb__atan2_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__atan2_fp64 // C=scalar+B GB_bind1st__atan2_fp64 // C=scalar+B' GB_bind1st_tran__atan2_fp64 // C=A+scalar GB_bind2nd__atan2_fp64 // C=A'+scalar GB_bind2nd_tran__atan2_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = atan2 (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = atan2 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN2 || GxB_NO_FP64 || GxB_NO_ATAN2_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__atan2_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__atan2_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__atan2_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__atan2_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__atan2_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__atan2_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = atan2 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__atan2_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = atan2 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = atan2 (x, aij) ; \ } GrB_Info GB_bind1st_tran__atan2_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = atan2 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__atan2_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cloudkeychain_fmt_plug.c
/* 1Password Cloud Keychain cracker patch for JtR. Hacked together during * April of 2013 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru.kholia at gmail.com>, * Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and Copyright (c) 2012 * magnum, and it is hereby released to the general public under the following * terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "onepasswordpy" project but no actual code is * borrowed from it. * * "onepasswordpy" project is at https://github.com/Roguelazer/onepasswordpy */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cloud_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_cloud_keychain); #else #include <stdint.h> #include <string.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha2.h" #include "pbkdf2_hmac_sha512.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "cloudkeychain" #define FORMAT_NAME "1Password Cloud Keychain" #define FORMAT_TAG "$cloudkeychain$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define HASH_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 111 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SALTLEN 32 #define IVLEN 16 #define CTLEN 2048 #define EHMLEN 32 #define PAD_SIZE 128 static struct fmt_tests cloud_keychain_tests[] = { {"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"}, // https://cache.agilebits.com/security-kb/freddy-2013-12-04.tar.gz, This is a sample OPVault file. The Master Password for it is freddy. {"$cloudkeychain$16$3f4a4e30c37a3b0e7020a38e4ac69242$50000$336$6f706461746130310001000000000000237c26e13beb237a85b8eacc4bddd111a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dcb5bb5a07ad0315fd9742d7d0edc9b9ed685bfa76978e228fdaa237dae4152731$256$16$237c26e13beb237a85b8eacc4bddd111$272$a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dc$32$b5bb5a07ad0315fd9742d7d0edc9b9ed685bfa76978e228fdaa237dae4152731$304$6f706461746130310001000000000000237c26e13beb237a85b8eacc4bddd111a7bb7bee7cf71f019df9268cb3751d563d1bebf0331e7def4c26eeb90e61d2c2339b3c2d23ce75e969f250a1be823732823687950be19722f2dc92f02e614352c082d04358c421c1ddc90d07d8c6c9fb46255846ef950f14547e5b72b32a0e64cf3d24646d41b7fdd57534a1dd808d15e8dfe4299ef7ee8a3e923dc28496504cacb0be647a4600797ade6cb41694c2eb4d41b674ce762d66e98895fde98dda862b84720874b09b080b50ef9514b4ea0e3a19f5d51ccb8850cd26623e56dadef2bcbc625194dd107f663a7548f991803075874ecc4fc98b785b4cd56c3ce9bcb23ccf70f1908fc85a5b9520cd20d9d26a3bfb29ac289c1262302c82f6b0877d566369b98fb551fb9d044434c4cb1c50dc", "freddy"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[SALTLEN]; unsigned int iterations; unsigned int masterkeylen; unsigned char masterkey[CTLEN]; unsigned int plaintextlen; unsigned int ivlen; unsigned char iv[32]; unsigned int cryptextlen; unsigned char cryptext[CTLEN]; unsigned int expectedhmaclen; unsigned char expectedhmac[EHMLEN]; unsigned int hmacdatalen; unsigned char hmacdata[CTLEN]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra)/2 != len || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdecu(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* masterkey length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* masterkey */ goto err; if (hexlenl(p, &extra)/2 != len || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* plaintext length */ goto err; if (!isdecu(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > IVLEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) / 2 != len || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cryptext length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > CTLEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cryptext */ goto err; if (hexlenl(p, &extra)/2 != len || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > EHMLEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* expectedhmac */ goto err; if (hexlenl(p, &extra)/2 != len || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata length */ goto err; if (!isdec(p)) goto err; len = atoi(p); if (len > CTLEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hmacdata */ goto err; if (hexlenl(p, &extra)/2 != len || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$cloudkeychain$" */ p = strtokm(ctcopy, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.iterations = atou(p); p = strtokm(NULL, "$"); cs.masterkeylen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.masterkeylen; i++) cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.plaintextlen = atou(p); p = strtokm(NULL, "$"); cs.ivlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.cryptextlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.cryptextlen; i++) cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.expectedhmaclen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.expectedhmaclen; i++) cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.hmacdatalen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.hmacdatalen; i++) cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt, uint32_t saltlen, uint32_t add, uint64_t * ret) { uint8_t i, ipad[64], opad[64]; SHA256_CTX ctx; memset(ipad, 0x36, 64); memset(opad, 0x5c, 64); for (i = 0; i < passlen; i++) { ipad[i] ^= pass[i]; opad[i] ^= pass[i]; } SHA256_Init(&ctx); SHA256_Update(&ctx, ipad, 64); SHA256_Update(&ctx, salt, saltlen); if (add > 0) { #if ARCH_LITTLE_ENDIAN add = JOHNSWAP(add); #endif SHA256_Update(&ctx, &add, 4); } SHA256_Final((uint8_t *) ret, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, opad, 64); SHA256_Update(&ctx, (uint8_t *) ret, 32); SHA256_Final((uint8_t *) ret, &ctx); } static int ckcdecrypt(unsigned char *key) { uint64_t tmp[8]; hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp); if (!memcmp(tmp, cur_salt->expectedhmac, 32)) return 1; else return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; uint64_t key[SSE_GROUP_SZ_SHA512][8]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (uint32_t*)(key[i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0); for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) cracked[index+i] = ckcdecrypt((unsigned char*)(key[i])); #else uint64_t key[8]; pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0); cracked[index] = ckcdecrypt((unsigned char*)key); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void cloud_keychain_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_cloud_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "iteration count", }, { FORMAT_TAG }, cloud_keychain_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, cloud_keychain_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB057-jacobiinitialize-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Use of private() clause */ #include <stdio.h> #include <math.h> #define MSIZE 200 #include <omp.h> int n = 200; int m = 200; double alpha = 0.0543; double u[200][200]; double f[200][200]; double uold[200][200]; double dx; double dy; void initialize() { int i; int j; int xx; int yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy) for (j = 0; j <= m - 1; j += 1) { /* -1 < x < 1 */ xx = ((int )(- 1.0 + dx * (i - 1))); /* -1 < y < 1 */ yy = ((int )(- 1.0 + dy * (j - 1))); u[i][j] = 0.0; f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)); } } } int main() { initialize(); int i; int j; for (i = 0; i <= n - 1; i += 1) { for (j = 0; j <= m - 1; j += 1) { printf("%lf %lf\n",u[i][j],f[i][j]); } } return 0; }
GB_unaryop__minv_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_uint8 // op(A') function: GB_tran__minv_uint32_uint8 // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_uint8 ( uint32_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_types.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> #include <math.h> void print_task_type(int id) { #pragma omp critical { int task_type; char buffer[2048]; ompt_get_task_info(0, &task_type, NULL, NULL, NULL, NULL); format_task_type(task_type, buffer); printf("%" PRIu64 ": id=%d task_type=%s=%d\n", ompt_get_thread_data()->value, id, buffer, task_type); } }; int main() { //initial task print_task_type(0); int x; //implicit task #pragma omp parallel num_threads(1) { print_task_type(1); x++; } #pragma omp parallel num_threads(2) #pragma omp master { //explicit task #pragma omp task { print_task_type(2); x++; } //explicit task with undeferred #pragma omp task if(0) { print_task_type(3); x++; } //explicit task with untied #pragma omp task untied { print_task_type(4); x++; } //explicit task with final #pragma omp task final(1) { print_task_type(5); x++; //nested explicit task with final and undeferred #pragma omp task { print_task_type(6); x++; } } //Mergeable task test deactivated for now //explicit task with mergeable /* #pragma omp task mergeable if((int)sin(0)) { print_task_type(7); x++; } */ //TODO: merged task } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=0, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // CHECK-NOT: 0: parallel_data initially not null // CHECK: {{^}}[[MASTER_ID]]: id=0 task_type=ompt_task_initial=1 // CHECK: {{^}}[[MASTER_ID]]: id=1 task_type=ompt_task_implicit|ompt_task_undeferred=134217730 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=2 task_type=ompt_task_explicit=4 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=3 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_untied=268435460, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=4 task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_final=536870916, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=5 task_type=ompt_task_explicit|ompt_task_final=536870916 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=6 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 return 0; }
convert.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "../sptensor.h" #include "hicoo.h" /************************************************* * PRIVATE FUNCTIONS *************************************************/ /** * Compare two specified coordinates. * @param[in] item1 first tuple * @param[in] item2 second tuple * @return 1, item1 == item2; otherwise, 0. */ static int spt_EqualWithTwoCoordinates( const sptIndex * item1, const sptIndex * item2, const sptIndex nmodes) { sptIndex i1, i2; for(sptIndex m=0; m<nmodes; ++m) { i1 = item1[m]; i2 = item2[m]; if(i1 != i2) { return 0; break; } } return 1; } /** * Compute the end of this block * @param tsr a pointer to a sparse tensor * @return out_item the end indices of this block */ static int spt_BlockEnd( sptIndex * out_item, sptSparseTensor *tsr, const sptIndex * in_item, const sptElementIndex sb) { sptIndex nmodes = tsr->nmodes; for(sptIndex m=0; m<nmodes; ++m) { sptAssert(in_item[m] < tsr->ndims[m]); out_item[m] = in_item[m]+sb < tsr->ndims[m] ? in_item[m]+sb : tsr->ndims[m]; // exclusive } return 0; } /** * Locate the beginning of the block/kernel containing the coordinates * @param tsr a pointer to a sparse tensor * @return out_item the beginning indices of this block */ static int spt_LocateBeginCoord( sptIndex * out_item, sptSparseTensor *tsr, const sptIndex * in_item, const sptElementIndex bits) { sptIndex nmodes = tsr->nmodes; for(sptIndex m=0; m<nmodes; ++m) { out_item[m] = in_item[m] >> bits; } return 0; } /** * Set scheduler for kernels. * @param[out] kschr nmodes kernel schedulers * @param[out] nkiters the number of columns for nmodes schedulers * @param[in] kptr a vector of kernel pointers * @param[in] tsr a pointer to a sparse tensor * @param[in] sk_bits the bits of superblock size (sk) * @return mode pointers */ int spt_SetKernelScheduler( sptIndexVector **kschr, sptIndex *nkiters, sptNnzIndexVector * const kptr, sptSparseTensor *tsr, const sptElementIndex sk_bits) { sptIndex nmodes = tsr->nmodes; sptIndex * ndims = tsr->ndims; int result = 0; sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord)); sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord)); for(sptNnzIndex k=0; k<kptr->len - 1; ++k) { sptNnzIndex z = kptr->data[k]; for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[z]; result = spt_LocateBeginCoord(kernel_coord, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) { result = sptAppendIndexVector(&(kschr[m][kernel_coord[m]]), k); spt_CheckError(result, "HiSpTns Convert", NULL); } } free(coord); free(kernel_coord); sptIndex sk = (sptIndex)pow(2, sk_bits); sptIndex tmp; for(sptIndex m=0; m<nmodes; ++m) { tmp = 0; sptIndex kernel_ndim = (ndims[m] + sk - 1) / sk; for(sptIndex i=0; i<kernel_ndim; ++i) { if(tmp < kschr[m][i].len) tmp = kschr[m][i].len; } nkiters[m] = tmp; } return 0; } /** * Pre-process COO sparse tensor by permuting, sorting, and record pointers to blocked rows. Kernels in Row-major order, blocks and elements are in Z-Morton order. * @param[out] kptr a vector of kernel pointers * @param[out] kschr nmodes kernel schedulers * @param[out] nkiters the number of columns for nmodes schedulers * @param[in] tsr a pointer to a sparse tensor * @param[in] sk_bits the bits of superblock size (sk) * @param[in] sb_bits the bits of block size (sb) * @param[in] tk the number of threads */ int spt_PreprocessSparseTensor( sptNnzIndexVector * kptr, sptIndexVector **kschr, sptIndex *nkiters, sptSparseTensor *tsr, const sptElementIndex sb_bits, const sptElementIndex sk_bits, int const tk) { sptNnzIndex nnz = tsr->nnz; int result; // TODO: possible permute modes to improve parallelism /* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */ sptTimer rowblock_sort_timer; sptNewTimer(&rowblock_sort_timer, 0); sptStartTimer(rowblock_sort_timer); sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk); // Parallelized inside sptStopTimer(rowblock_sort_timer); sptPrintElapsedTime(rowblock_sort_timer, "rowblock sorting"); sptFreeTimer(rowblock_sort_timer); #if PARTI_DEBUG == 3 printf("Sorted by sptSparseTensorSortIndexRowBlock.\n"); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); #endif sptTimer set_kernel_timer; sptNewTimer(&set_kernel_timer, 0); sptStartTimer(set_kernel_timer); result = sptSetKernelPointers(kptr, tsr, sk_bits); spt_CheckError(result, "HiSpTns Preprocess", NULL); result = spt_SetKernelScheduler(kschr, nkiters, kptr, tsr, sk_bits); spt_CheckError(result, "HiSpTns Preprocess", NULL); sptStopTimer(set_kernel_timer); sptPrintElapsedTime(set_kernel_timer, "Set Kernel Ptrs"); sptFreeTimer(set_kernel_timer); sptTimer morton_sort_timer; sptNewTimer(&morton_sort_timer, 0); sptStartTimer(morton_sort_timer); /* Sort blocks in each kernel in Morton-order */ sptNnzIndex k_begin, k_end; /* Loop for all kernels, 0-kptr.len for OMP code */ #pragma omp parallel for num_threads(tk) for(sptNnzIndex k=0; k<kptr->len - 1; ++k) { k_begin = kptr->data[k]; k_end = kptr->data[k+1]; // exclusive /* Sort blocks in each kernel in Morton-order */ sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk); #if PARTI_DEBUG == 3 printf("Kernel %"PARTI_PRI_NNZ_INDEX ": Sorted by sptSparseTensorSortIndexMorton.\n", k); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); #endif } sptStopTimer(morton_sort_timer); sptPrintElapsedTime(morton_sort_timer, "Morton sorting"); sptFreeTimer(morton_sort_timer); return 0; } /************************************************* * PUBLIC FUNCTIONS *************************************************/ /** * Record mode pointers for kernel rows, from a sorted tensor. * @param[out] kptr a vector of kernel pointers * @param[in] tsr a pointer to a sparse tensor * @param[in] sk_bits the bits of superblock size (sk) */ int sptSetKernelPointers( sptNnzIndexVector *kptr, sptSparseTensor *tsr, const sptElementIndex sk_bits) { sptIndex nmodes = tsr->nmodes; sptNnzIndex nnz = tsr->nnz; sptNnzIndex k = 0; // count kernels sptNnzIndex knnz = 0; // #Nonzeros per kernel int result = 0; result = sptAppendNnzIndexVector(kptr, 0); spt_CheckError(result, "HiSpTns Convert", NULL); sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord)); sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord)); sptIndex * kernel_coord_prior = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord_prior)); /* Process first nnz to get the first kernel_coord_prior */ for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[0]; // first nonzero indices result = spt_LocateBeginCoord(kernel_coord_prior, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptNnzIndex z=0; z<nnz; ++z) { for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[z]; result = spt_LocateBeginCoord(kernel_coord, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); if(spt_EqualWithTwoCoordinates(kernel_coord, kernel_coord_prior, nmodes) == 1) { ++ knnz; } else { ++ k; result = sptAppendNnzIndexVector(kptr, knnz + kptr->data[k-1]); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) kernel_coord_prior[m] = kernel_coord[m]; knnz = 1; } } sptAssert(k < kptr->len); sptAssert(kptr->data[kptr->len-1] + knnz == nnz); /* Set the last element for kptr */ sptAppendNnzIndexVector(kptr, nnz); free(coord); free(kernel_coord); free(kernel_coord_prior); return 0; } /** * Convert a COO tensor to a HiCOO tensor. * @param[out] hitsr the sparse tensor in HiCOO format * @param[out] max_nnzb the maximum number of nonzeros per tensor block * @param[in] tsr a pointer to a sparse tensor * @param[in] sb_bits the bits of block size (sb) * @param[in] sk_bits the bits of superblock size (sk) * @param[in] tk the number of threads */ int sptSparseTensorToHiCOO( sptSparseTensorHiCOO *hitsr, sptNnzIndex *max_nnzb, sptSparseTensor *tsr, const sptElementIndex sb_bits, const sptElementIndex sk_bits, int const tk) { const sptElementIndex sc_bits = 14; // It is kept for the future use. sptAssert(sk_bits >= sb_bits); sptAssert(sc_bits >= sb_bits); sptIndex i; int result; sptIndex nmodes = tsr->nmodes; sptNnzIndex nnz = tsr->nnz; sptElementIndex sb = pow(2, sb_bits); sptIndex sc = pow(2, sc_bits); /* Set HiCOO parameters. ndims for type conversion, size_t -> sptIndex */ sptIndex * ndims = malloc(nmodes * sizeof *ndims); spt_CheckOSError(!ndims, "HiSpTns Convert"); for(i = 0; i < nmodes; ++i) { ndims[i] = (sptIndex)tsr->ndims[i]; } result = sptNewSparseTensorHiCOO(hitsr, (sptIndex)tsr->nmodes, ndims, (sptNnzIndex)tsr->nnz, sb_bits, sk_bits, sc_bits); spt_CheckError(result, "HiSpTns Convert", NULL); /* Pre-process tensor to get hitsr->kptr, values are nonzero locations. */ sptTimer sort_timer; sptNewTimer(&sort_timer, 0); sptStartTimer(sort_timer); spt_PreprocessSparseTensor(&hitsr->kptr, hitsr->kschr, hitsr->nkiters, tsr, sb_bits, sk_bits, tk); sptStopTimer(sort_timer); sptPrintElapsedTime(sort_timer, "HiCOO sorting (rowblock + morton)"); sptFreeTimer(sort_timer); #if PARTI_DEBUG >= 2 printf("Kernels: Row-major, blocks: Morton-order sorted:\n"); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); printf("hitsr->kptr:\n"); sptDumpNnzIndexVector(&hitsr->kptr, stdout); #endif sptTimer gen_timer; sptNewTimer(&gen_timer, 0); sptStartTimer(gen_timer); /* Temporary storage */ sptIndex * block_begin = (sptIndex *)malloc(nmodes * sizeof(*block_begin)); sptIndex * block_end = (sptIndex *)malloc(nmodes * sizeof(*block_end)); sptIndex * block_begin_prior = (sptIndex *)malloc(nmodes * sizeof(*block_begin_prior)); sptIndex * block_coord = (sptIndex *)malloc(nmodes * sizeof(*block_coord)); sptNnzIndex k_begin, k_end; // #Nonzeros locations sptNnzIndex nk = 0; // #Kernels sptNnzIndex nc = 0; // #Chunks sptNnzIndex nb = 1; // #Blocks // counting from the first nnz sptNnzIndex nb_tmp = 0; sptNnzIndex ne = 0; // #Nonzeros per block sptIndex eindex = 0; sptBlockIndex chunk_size = 0; /* different appending methods: * elements: append every nonzero entry * blocks: append when seeing a new block. * chunks: appending when seeting a new chunk. Notice the boundary of kernels and the last chunk of the whole tensor may be larger than the sc. * kernels: append when seeing a new kernel. Not appending a vector, just write data into an allocated array. */ /* Process first nnz */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = tsr->inds[m].data[0]; // first nonzero indices result = spt_LocateBeginCoord(block_begin_prior, tsr, block_coord, sb_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin_prior[m]); sptAppendNnzIndexVector(&hitsr->bptr, 0); /* Loop for all kernels, 0 - hitsr->kptr.len - 1 for OMP code */ for(sptNnzIndex k=0; k<hitsr->kptr.len - 1; ++k) { k_begin = hitsr->kptr.data[k]; k_end = hitsr->kptr.data[k+1]; // exclusive nb_tmp = k == 0 ? 0: nb; /* Modify kptr pointing to block locations */ hitsr->kptr.data[k] = nb_tmp; ++ nk; /* Only append a chunk for the new kernel, the last chunk in the old kernel may be larger than sc */ sptAppendNnzIndexVector(&hitsr->cptr, nb_tmp); ++ nc; chunk_size = 0; /* Loop nonzeros in each kernel */ for(sptNnzIndex z = k_begin; z < k_end; ++z) { #if PARTI_DEBUG == 5 printf("z: %"PARTI_PRI_NNZ_INDEX "\n", z); #endif for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = tsr->inds[m].data[z]; // first nonzero indices #if PARTI_DEBUG == 5 printf("block_coord:\n"); sptAssert(sptDumpIndexArray(block_coord, nmodes, stdout) == 0); #endif result = spt_LocateBeginCoord(block_begin, tsr, block_coord, sb_bits); spt_CheckError(result, "HiSpTns Convert", NULL); #if PARTI_DEBUG == 5 printf("block_begin_prior:\n"); sptAssert(sptDumpIndexArray(block_begin_prior, nmodes, stdout) == 0); printf("block_begin:\n"); sptAssert(sptDumpIndexArray(block_begin, nmodes, stdout) == 0); #endif result = spt_BlockEnd(block_end, tsr, block_begin, sb); // exclusive spt_CheckError(result, "HiSpTns Convert", NULL); /* Append einds and values */ for(sptIndex m=0; m<nmodes; ++m) { eindex = tsr->inds[m].data[z] < (block_begin[m] << sb_bits) ? tsr->inds[m].data[z] : tsr->inds[m].data[z] - (block_begin[m] << sb_bits); sptAssert(eindex < sb); sptAppendElementIndexVector(&hitsr->einds[m], (sptElementIndex)eindex); } sptAppendValueVector(&hitsr->values, tsr->values.data[z]); /* z in the same block with last z */ if (spt_EqualWithTwoCoordinates(block_begin, block_begin_prior, nmodes) == 1) { /* ne: #Elements in current block */ ++ ne; } else { /* New block */ /* ne: #Elements in the last block */ /* Append block bptr and bidx */ sptAppendNnzIndexVector(&hitsr->bptr, (sptBlockIndex)z); for(sptIndex m=0; m<nmodes; ++m) sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin[m]); for(sptIndex m=0; m<nmodes; ++m) block_begin_prior[m] = block_begin[m]; /* ne: old block's number of nonzeros */ if(chunk_size + ne >= sc) { // calculate the prior block /* Append a chunk ending by the old block */ sptAppendNnzIndexVector(&hitsr->cptr, nb); ++ nc; chunk_size = 0; } else { chunk_size += ne; } ++ nb; ne = 1; } // End new block #if PARTI_DEBUG == 5 printf("nk: %u, nc: %u, nb: %u, ne: %u, chunk_size: %lu\n\n", nk, nc, nb, ne, chunk_size); #endif } // End z loop } // End k loop sptAssert(nb <= nnz); sptAssert(nb == hitsr->binds[0].len); // sptAssert(nc <= nb); sptAssert(nk == hitsr->kptr.len - 1); /* Last element for kptr, cptr, bptr */ hitsr->kptr.data[hitsr->kptr.len - 1] = hitsr->bptr.len; sptAppendNnzIndexVector(&hitsr->cptr, hitsr->bptr.len); sptAppendNnzIndexVector(&hitsr->bptr, nnz); *max_nnzb = hitsr->bptr.data[1] - hitsr->bptr.data[0]; sptNnzIndex sum_nnzb = 0; for(sptIndex i=0; i < hitsr->bptr.len - 1; ++i) { sptNnzIndex nnzb = hitsr->bptr.data[i+1] - hitsr->bptr.data[i]; sum_nnzb += nnzb; if(*max_nnzb < nnzb) { *max_nnzb = nnzb; } } sptAssert(sum_nnzb == hitsr->nnz); sptStopTimer(gen_timer); sptPrintElapsedTime(gen_timer, "Generate HiCOO"); sptFreeTimer(gen_timer); free(block_begin); free(block_end); free(block_begin_prior); free(block_coord); return 0; }
GB_binop__bxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint32) // A*D function (colscale): GB (_AxD__bxor_uint32) // D*A function (rowscale): GB (_DxB__bxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint32) // C=scalar+B GB (_bind1st__bxor_uint32) // C=scalar+B' GB (_bind1st_tran__bxor_uint32) // C=A+scalar GB (_bind2nd__bxor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxor_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pear-flip.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include "fastq.h" #include "args.h" #include "emp.h" #include "reader.h" /* possibly implement it such that we keep a number of strings for each diff_cnt */ #define ALPHA 1 #define BETA 2 #define MAX_READ_SIZE 300 #define PHRED_INIT 33 extern long double * precomp; //int stat_test (double, double, int, double); int stat_test2 (double, double, int, double); double assemble_overlap (struct read_t * left, struct read_t * right, int base_left, int base_right, int ol_size, struct read_t * ai); /* double assemble_overlap_ef (struct reads_info * left, struct reads_info * right, int base_left, int base_right, int ol_size, struct asm_info * ai, struct emp_freq * ef); */ struct dp_matrix { int cnt_match; int cnt_error; double score; }; /* TODO: Dynamically allocate them */ double sc_eq[256][256]; double sc_neq[256][256]; double sc_eqA[256][256]; double sc_eqC[256][256]; double sc_eqG[256][256]; double sc_eqT[256][256]; double sc_neqAC[256][256]; double sc_neqAG[256][256]; double sc_neqAT[256][256]; double sc_neqCA[256][256]; double sc_neqCG[256][256]; double sc_neqCT[256][256]; double sc_neqGA[256][256]; double sc_neqGC[256][256]; double sc_neqGT[256][256]; double sc_neqTA[256][256]; double sc_neqTC[256][256]; double sc_neqTG[256][256]; double qs_mul[256][256]; int match_score = 1; int mismatch_score = 1; /** @brief Trimming of forward part of unassembled sequence * * Finds two adjacent quality scores that are smaller than the minimum quality score threshold \a min_quality. * It then \e trims the part starting from the rightmost quality score position. * * @param read * Forward sequence * * @param min_quality * Minimum quality score threshold * * @param len * Length of the forward sequence * * @return * Returns the length of the trimmed sequence */ int trim (struct read_t * read, int min_quality, int len, double * uncalled) { int i; *uncalled = 0; for (i = 0; i < len - 1; ++ i) { if (read->data[i] == 'N' || read->data[i] == 'n') ++ (*uncalled); if ( read->qscore[i] - PHRED_INIT < min_quality && read->qscore[i + 1] - PHRED_INIT < min_quality) { read->qscore[i + 1] = 0; read->data[i + 1] = 0; *uncalled = (*uncalled) / (i + 1); return i + 1; } } if (read->data[len - 1] == 'N' || read->data[len - 1] == 'n') ++ (*uncalled); *uncalled = (*uncalled) / len; return (len); } int trim_cpl (struct read_t * read, int min_quality, int len, double * uncalled) { int i, j; *uncalled = 0; for (i = len - 1; i > 0; -- i) { if (read->qscore[i] - PHRED_INIT < min_quality && read->qscore[i - 1] - PHRED_INIT < min_quality) { read->qscore[i - 1] = 0; read->data[i - 1] = 0; memmove (read->data, read->data + i, strlen (read->data + i) + 1); memmove (read->qscore, read->qscore + i, strlen (read->qscore + i) + 1); for (j = 0; read->data[j]; ++ j) if (read->data[j] == 'N' || read->data[j] == 'n') ++ (*uncalled); *uncalled = (*uncalled) / j; return (len - i); } } for (j = 0; read->data[j]; ++ j) if (read->data[j] == 'N' || read->data[j] == 'n') ++ (*uncalled); *uncalled = (*uncalled) / j; return (len); } void init_scores (int match, int mismatch, struct emp_freq * ef) { int i, j; double ex, ey; double pa2, pc2, pg2, pt2, pagt2, pcgt2, pact2, pacg2, pacg, pact, pagt, pcgt; double pac2, pag2, pat2, pcg2, pct2, pgt2; double p2acg, p2act, p2agt, p2cgt; pa2 = ef->pa * ef->pa; pc2 = ef->pc * ef->pc; pg2 = ef->pg * ef->pg; pt2 = ef->pt * ef->pt; pacg2 = pa2 + pc2 + pg2; pacg = ef->pa + ef->pc + ef->pg; p2acg = pacg * pacg; pact2 = pa2 + pc2 + pt2; pact = ef->pa + ef->pc + ef->pt; p2act = pact * pact; pagt2 = pa2 + pg2 + pt2; pagt = ef->pa + ef->pg + ef->pt; p2agt = pagt * pagt; pcgt2 = pc2 + pg2 + pt2; pcgt = ef->pc + ef->pg + ef->pt; p2cgt = pcgt * pcgt; pac2 = (ef->pa + ef->pc) * (ef->pa + ef->pc); pag2 = (ef->pa + ef->pg) * (ef->pa + ef->pg); pat2 = (ef->pa + ef->pt) * (ef->pa + ef->pt); pcg2 = (ef->pc + ef->pg) * (ef->pc + ef->pg); pct2 = (ef->pc + ef->pt) * (ef->pc + ef->pt); pgt2 = (ef->pg + ef->pt) * (ef->pg + ef->pt); for (i = 0; i < 256; ++ i) { for (j = 0; j < 256; ++ j) { ex = pow (10.0, - (i - PHRED_INIT) / 10.0); ey = pow (10.0, - (j - PHRED_INIT) / 10.0); sc_eq[i][j] = match * ((1 - ex) * (1 - ey) + (ex * ey) / 3.0); sc_neq[i][j] = mismatch * (1 - (1.0 / 3.0) * (1 - ex) * ey - (1.0 / 3.0) * (1 - ey) * ex - (2.0 / 9.0) * ex * ey); qs_mul[i][j] = ex * ey; sc_eqA[i][j] = match * (1 - ex) * (1 - ey) + (ex * ey) * pcgt2 / p2cgt; sc_eqC[i][j] = match * (1 - ex) * (1 - ey) + (ex * ey) * pagt2 / p2agt; sc_eqG[i][j] = match * (1 - ex) * (1 - ey) + (ex * ey) * pact2 / p2act; sc_eqT[i][j] = match * (1 - ex) * (1 - ey) + (ex * ey) * pacg2 / p2acg; sc_neqAC[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pc / pcgt) - (1 - ex) * ey * (ef->pa / pagt) - ex * ey * (pg2 + pt2) / pgt2); sc_neqCA[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pa / pagt) - (1 - ex) * ey * (ef->pc / pcgt) - ex * ey * (pg2 + pt2) / pgt2); sc_neqAG[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pg / pcgt) - (1 - ex) * ey * (ef->pa / pact) - ex * ey * (pc2 + pt2) / pct2); sc_neqGA[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pa / pact) - (1 - ex) * ey * (ef->pg / pcgt) - ex * ey * (pc2 + pt2) / pct2); sc_neqAT[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pt / pcgt) - (1 - ex) * ey * (ef->pa / pacg) - ex * ey * (pc2 + pg2) / pcg2); sc_neqTA[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pa / pacg) - (1 - ex) * ey * (ef->pt / pcgt) - ex * ey * (pc2 + pg2) / pcg2); sc_neqCG[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pg / pagt) - (1 - ex) * ey * (ef->pc / pact) - ex * ey * (pa2 + pt2) / pat2); sc_neqGC[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pc / pact) - (1 - ex) * ey * (ef->pg / pagt) - ex * ey * (pa2 + pt2) / pat2); sc_neqCT[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pt / pagt) - (1 - ex) * ey * (ef->pc / pacg) - ex * ey * (pa2 + pg2) / pag2); sc_neqTC[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pc / pacg) - (1 - ex) * ey * (ef->pt / pagt) - ex * ey * (pa2 + pg2) / pag2); sc_neqGT[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pt / pact) - (1 - ex) * ey * (ef->pg / pacg) - ex * ey * (pa2 + pc2) / pac2); sc_neqTG[i][j] = mismatch * (1- (1 - ey) * ex * (ef->pg / pacg) - (1 - ex) * ey * (ef->pt / pact) - ex * ey * (pa2 + pc2) / pac2); } } } inline void scoring_ef (char dleft, char dright, char qleft, char qright, int score_method, double * score, int match, int mismatch, struct emp_freq * ef) { if (dleft == 'N' || dright == 'N') /* one of them is N */ { switch (score_method) { case 1: *score += (ef->q * match - (1 - ef->q) * mismatch); break; case 2: *score -= (1 - ef->q) * mismatch; break; case 3: *score -= mismatch; break; } } else if (dleft == dright) /* equal */ { switch (score_method) { case 1: switch (dleft) { case 'A': *score += (sc_eqA[(int)qright][(int)qleft] - (1 - sc_eqA[(int)qright][(int)qleft] / match) * mismatch); break; case 'C': *score += (sc_eqC[(int)qright][(int)qleft] - (1 - sc_eqC[(int)qright][(int)qleft] / match) * mismatch); break; case 'G': *score += (sc_eqG[(int)qright][(int)qleft] - (1 - sc_eqG[(int)qright][(int)qleft] / match) * mismatch); break; case 'T': *score += (sc_eqT[(int)qright][(int)qleft] - (1 - sc_eqT[(int)qright][(int)qleft] / match) * mismatch); break; } break; case 2: switch (dleft) { case 'A': *score += sc_eqA[(int)qright][(int)qleft]; break; case 'C': *score += sc_eqC[(int)qright][(int)qleft]; break; case 'G': *score += sc_eqG[(int)qright][(int)qleft]; break; case 'T': *score += sc_eqT[(int)qright][(int)qleft]; break; } break; case 3: *score += match; break; } } else /* not equal */ { switch (score_method) { case 1: switch (dleft) { case 'A': switch (dright) { case 'C': *score = *score - (sc_neqAC[(int)qleft][(int)qright] - (1 - sc_neqAC[(int)qleft][(int)qright] / mismatch) * match); break; case 'G': *score = *score - (sc_neqAG[(int)qleft][(int)qright] - (1 - sc_neqAG[(int)qleft][(int)qright] / mismatch) * match); break; case 'T': *score = *score - (sc_neqAT[(int)qleft][(int)qright] - (1 - sc_neqAT[(int)qleft][(int)qright] / mismatch) * match); break; } break; case 'C': switch (dright) { case 'A': *score = *score - (sc_neqCA[(int)qleft][(int)qright] - (1 - sc_neqCA[(int)qleft][(int)qright] / mismatch) * match); break; case 'G': *score = *score - (sc_neqCG[(int)qleft][(int)qright] - (1 - sc_neqCG[(int)qleft][(int)qright] / mismatch) * match); break; case 'T': *score = *score - (sc_neqCT[(int)qleft][(int)qright] - (1 - sc_neqCT[(int)qleft][(int)qright] / mismatch) * match); break; } break; case 'G': switch (dright) { case 'A': *score = *score - (sc_neqGA[(int)qleft][(int)qright] - (1 - sc_neqGA[(int)qleft][(int)qright] / mismatch) * match); break; case 'C': *score = *score - (sc_neqGC[(int)qleft][(int)qright] - (1 - sc_neqGC[(int)qleft][(int)qright] / mismatch) * match); break; case 'T': *score = *score - (sc_neqGT[(int)qleft][(int)qright] - (1 - sc_neqGT[(int)qleft][(int)qright] / mismatch) * match); break; } break; case 'T': switch (dright) { case 'A': *score = *score - (sc_neqTA[(int)qleft][(int)qright] - (1 - sc_neqTA[(int)qleft][(int)qright] / mismatch) * match); break; case 'C': *score = *score - (sc_neqTC[(int)qleft][(int)qright] - (1 - sc_neqTC[(int)qleft][(int)qright] / mismatch) * match); break; case 'G': *score = *score - (sc_neqTG[(int)qleft][(int)qright] - (1 - sc_neqTG[(int)qleft][(int)qright] / mismatch) * match); break; } break; } break; case 2: switch (dleft) { case 'A': switch (dright) { case 'C': *score -= sc_neqAC[(int)qleft][(int)qright]; break; case 'G': *score -= sc_neqAG[(int)qleft][(int)qright]; break; case 'T': *score -= sc_neqAT[(int)qleft][(int)qright]; break; } break; case 'C': switch (dright) { case 'A': *score -= sc_neqCA[(int)qleft][(int)qright]; break; case 'G': *score -= sc_neqCG[(int)qleft][(int)qright]; break; case 'T': *score -= sc_neqCT[(int)qleft][(int)qright]; break; } break; case 'G': switch (dright) { case 'A': *score -= sc_neqGA[(int)qleft][(int)qright]; break; case 'C': *score -= sc_neqGC[(int)qleft][(int)qright]; break; case 'T': *score -= sc_neqGT[(int)qleft][(int)qright]; break; } break; case 'T': switch (dright) { case 'A': *score -= sc_neqTA[(int)qleft][(int)qright]; break; break; case 'C': *score -= sc_neqTC[(int)qleft][(int)qright]; break; case 'G': *score -= sc_neqTG[(int)qleft][(int)qright]; break; } break; } break; case 3: *score -= mismatch; break; } } } /* TODO: Remember to speed up this function by doing something with the multiplication and division of match/mismatch */ inline void scoring (char dleft, char dright, char qleft, char qright, int score_method, double * score, int match, int mismatch) { if (dleft == 'N' || dright == 'N') /* one of them is N */ { switch (score_method) { case 1: *score += (0.25 * match - (1 - 0.25) * mismatch); break; case 2: *score -= (1 - 0.25) * mismatch; break; case 3: *score -= mismatch; break; } } else if (dleft == dright) /* equal */ { switch (score_method) { case 1: *score += (sc_eq[(int)qright][(int)qleft] - (1 - sc_eq[(int)qright][(int)qleft] / match) * mismatch); break; case 2: *score += sc_eq[(int)qright][(int)qleft]; break; case 3: *score += match; break; } } else /* not equal */ { switch (score_method) { case 1: *score = *score - (sc_neq[(int)qright][(int)qleft] - (1 - sc_neq[(int)qright][(int)qleft] / mismatch) * match); break; case 2: *score -= sc_neq[(int)qright][(int)qleft]; break; case 3: *score -= mismatch; break; } } } inline int assembly_ef (struct read_t * left, struct read_t * right, int match_score, int mismatch_score, struct emp_freq * ef, struct user_args * sw) { int i,j; int n; double score; double best_score = 0; int best_overlap = 0; /* overlap of the best alignment */ int run_through = 0; int nMatch; int asm_len = 0; int st_pass; int uncalled = 0; n = strlen (left->data); /* compute score for every overlap */ score = 0; for (i = 0; i <= n; ++ i) /* the size of the overlap */ { nMatch = 0; score = 0; for (j = 0; j < i; ++ j) { scoring_ef (left->data[n - i + j], right->data[j], left->qscore[n - i + j], right->qscore[j], sw->score_method, &score, match_score, mismatch_score, ef); if (left->data[n - i + j] == right->data[j]) ++nMatch; } if (score > best_score) { best_overlap = i; best_score = score; } } /* compute for score for runthrough case */ for (i = n - 1; i > 0; --i) { score = 0; nMatch = 0; for (j = 0; j < i; ++j) { scoring_ef (left->data[j], right->data[n - i + j], left->qscore[j], right->qscore[n - i + j], sw->score_method, &score, match_score, mismatch_score, ef); if (left->data[n - i + j] == right->data[j]) ++nMatch; } if (score > best_score) { run_through = 1; best_overlap = i; best_score = score; } } if (sw->test == 1) { st_pass = stat_test2 (sw->p_value, best_score, sw->min_overlap, ef->q); } else { st_pass = stat_test2 (sw->p_value, best_score, best_overlap, ef->q); } if (!st_pass) return (0); /* do the assembly!!!! */ if (!run_through) { if (best_overlap == 0) { asm_len = 2 * n; for (j = 0; j < n; ++ j) if (left->data[j] == 'N' || left->data[j] == 'n') ++uncalled; for (j = 0; j < n; ++ j) if (right->data[j] == 'N' || right->data[j] == 'n') ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 1; } else { return (0); } } else if (best_overlap == n ) { asm_len = n; for (j = 0; j < asm_len; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[j] == 'N' || right->data[j] == 'n')) ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 0; assemble_overlap (left, right, 0, 0, n, left); left->data[n] = 0; left->qscore[n] = 0; } else { return (0); } } else { asm_len = 2 * n - best_overlap; for (j = 0; j < n - best_overlap; ++ j) if (left->data[j] == 'N' || left->data[j] == 'n') ++uncalled; for (j = n - best_overlap; j < n; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[j - n + best_overlap] == 'N' || right->data[j - n + best_overlap] == 'n')) ++uncalled; for (j = best_overlap; j < n; ++ j) if (right->data[j] == 'N' || right->data[j] == 'n') ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 1; assemble_overlap (left, right, n - best_overlap, 0, best_overlap, left); memmove (right->data, right->data + best_overlap, n - best_overlap); memmove (right->qscore, right->qscore + best_overlap, n - best_overlap); /* THIS IS WRONG */ //memcpy (right->data, right->data + best_overlap, n - best_overlap); //memcpy (right->qscore, right->qscore + best_overlap, n - best_overlap); right->data[n - best_overlap] = 0; right->qscore[n - best_overlap] = 0; } else { return (0); } } } /* run-through case */ else { asm_len = best_overlap; /* compute uncalled */ for (j = 0; j < asm_len; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[n - best_overlap + j] == 'N' || right->data[n - best_overlap + j] == 'n')) ++uncalled; uncalled /= asm_len; if (asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { assemble_overlap (left, right, 0, n - best_overlap, best_overlap, left); left->data[best_overlap] = 0; left->qscore[best_overlap] = 0; *(left->data - 1) = 0; /* flag that it's one piece */ } else { return (0); } } /* TODO: Remember to reset *(let->data - 1) */ /* TODO: Optimize this in assemble_overlap? */ return (1); } inline int assembly (struct read_t * left, struct read_t * right, int match_score, int mismatch_score, struct user_args * sw) { int i,j; int n; double score; double best_score = 0; int best_overlap = 0; /* overlap of the best alignment */ int run_through = 0; int nMatch; int asm_len = 0; int st_pass; int uncalled = 0; n = strlen (left->data); /* compute score for every overlap */ score = 0; for (i = 0; i <= n; ++ i) /* the size of the overlap */ { nMatch = 0; score = 0; for (j = 0; j < i; ++ j) { scoring (left->data[n - i + j], right->data[j], left->qscore[n - i + j], right->qscore[j], sw->score_method, &score, match_score, mismatch_score); if (left->data[n - i + j] == right->data[j]) ++nMatch; } if (score > best_score) { best_overlap = i; best_score = score; } } /* compute for score for runthrough case */ for (i = n - 1; i > 0; --i) { score = 0; nMatch = 0; for (j = 0; j < i; ++j) { scoring (left->data[j], right->data[n - i + j], left->qscore[j], right->qscore[n - i + j], sw->score_method, &score, match_score, mismatch_score); if (left->data[n - i + j] == right->data[j]) ++nMatch; } if (score > best_score) { run_through = 1; best_overlap = i; best_score = score; } } if (sw->test == 1) { st_pass = stat_test2 (sw->p_value, best_score, sw->min_overlap, 0.25); } else { st_pass = stat_test2 (sw->p_value, best_score, best_overlap, 0.25); } if (!st_pass) return (0); /* do the assembly!!!! */ if (!run_through) { if (best_overlap == 0) { asm_len = 2 * n; for (j = 0; j < n; ++ j) if (left->data[j] == 'N' || left->data[j] == 'n') ++uncalled; for (j = 0; j < n; ++ j) if (right->data[j] == 'N' || right->data[j] == 'n') ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 1; } else { return (0); } } else if (best_overlap == n ) { asm_len = n; for (j = 0; j < asm_len; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[j] == 'N' || right->data[j] == 'n')) ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 0; assemble_overlap (left, right, 0, 0, n, left); left->data[n] = 0; left->qscore[n] = 0; } else { return (0); } } else { asm_len = 2 * n - best_overlap; for (j = 0; j < n - best_overlap; ++ j) if (left->data[j] == 'N' || left->data[j] == 'n') ++uncalled; for (j = n - best_overlap; j < n; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[j - n + best_overlap] == 'N' || right->data[j - n + best_overlap] == 'n')) ++uncalled; for (j = best_overlap; j < n; ++ j) if (right->data[j] == 'N' || right->data[j] == 'n') ++uncalled; uncalled /= asm_len; if (2 * n - asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { *(left->data - 1) = 1; assemble_overlap (left, right, n - best_overlap, 0, best_overlap, left); memmove (right->data, right->data + best_overlap, n - best_overlap); memmove (right->qscore, right->qscore + best_overlap, n - best_overlap); /* THIS IS WRONG */ //memcpy (right->data, right->data + best_overlap, n - best_overlap); //memcpy (right->qscore, right->qscore + best_overlap, n - best_overlap); right->data[n - best_overlap] = 0; right->qscore[n - best_overlap] = 0; } else { return (0); } } } /* run-through case */ else { asm_len = best_overlap; /* compute uncalled */ for (j = 0; j < asm_len; ++ j) if ((left->data[j] == 'N' || left->data[j] == 'n') && (right->data[n - best_overlap + j] == 'N' || right->data[n - best_overlap + j] == 'n')) ++uncalled; uncalled /= asm_len; if (asm_len >= sw->min_overlap && asm_len >= sw->min_asm_len && asm_len <= sw->max_asm_len && uncalled <= sw->max_uncalled) { assemble_overlap (left, right, 0, n - best_overlap, best_overlap, left); left->data[best_overlap] = 0; left->qscore[best_overlap] = 0; *(left->data - 1) = 0; /* flag that it's one piece */ } else { return (0); } } /* TODO: Remember to reset *(let->data - 1) */ /* TODO: Optimize this in assemble_overlap? */ return (1); } double assemble_overlap (struct read_t * left, struct read_t * right, int base_left, int base_right, int ol_size, struct read_t * ai) { int i; char x, y; char qx, qy; //double exp_match = 0; for (i = 0; i < ol_size; ++i) { x = left->data[base_left + i]; y = right->data[base_right + i]; qx = left->qscore[base_left + i]; qy = right->qscore[base_right + i]; if ( (x == 'N' || x == 'n') && (y == 'N' || y == 'n')) { //exp_match += 0.25; sm_len ai->data[base_left + i] = 'N'; ai->qscore[base_left + i] = ( qx < qy ) ? qx : qy; } else if (x == 'N' || x == 'n') { //exp_match += 0.25; ai->data[base_left + i] = y; ai->qscore[base_left + i] = qy; } else if (y == 'N' || y == 'n') { //exp_match += 0.25; ai->data[base_left + i] = x; ai->qscore[base_left + i] = qx; } else { if (x == y) { //exp_match += (sc_eq[(int)qx][(int)qy] / match_score); ai->data[base_left + i] = x; ai->qscore[base_left + i] = (right->qscore[base_right + i] - PHRED_INIT) + (left->qscore[base_left + i] - PHRED_INIT) + PHRED_INIT; //qs_mul[qx][qy]; } else { //exp_match += (1 - sc_neq[(int)qx][(int)qy] / mismatch_score); if (qx > qy) { ai->data[base_left + i] = x; ai->qscore[base_left + i] = qx; } else { ai->data[base_left + i] = y; ai->qscore[base_left + i] = qy; } } } } //if (ol_size == 0) return (0); //return (exp_match / (double)ol_size); return (0); } /* double assemble_overlap_ef (struct reads_info * left, struct reads_info * right, int base_left, int base_right, int ol_size, struct asm_info * ai, struct emp_freq * ef) { int i; char x, y; char qx, qy; double exp_match = 0; for (i = 0; i < ol_size; ++i) { x = left->data[base_left + i]; y = right->data[base_right + i]; qx = left->qscore[base_left + i]; qy = right->qscore[base_right + i]; if ( (x == 'N' || x == 'n') && (y == 'N' || y == 'n')) { exp_match += ef->q; ai->data[base_left + i] = 'N'; ai->qscore[base_left + i] = ( qx < qy ) ? qx : qy; } else if (x == 'N' || x == 'n') { exp_match += ef->q; ai->data[base_left + i] = y; ai->qscore[base_left + i] = qy; } else if (y == 'N' || y == 'n') { exp_match += ef->q; ai->data[base_left + i] = x; ai->qscore[base_left + i] = qx; } else { if (x == y) { //exp_match += (sc_eq[(int)qx][(int)qy] / match_score); switch (x) { case 'A': exp_match += (sc_eqA[(int)qy][(int)qx] / match_score); break; case 'C': exp_match += (sc_eqC[(int)qy][(int)qx] / match_score); break; case 'G': exp_match += (sc_eqG[(int)qy][(int)qx] / match_score); break; case 'T': exp_match += (sc_eqT[(int)qy][(int)qx] / match_score); break; } ai->data[base_left + i] = x; ai->qscore[base_left + i] = (right->qscore[base_right + i] - PHRED_INIT) + (left->qscore[base_left + i] - PHRED_INIT) + PHRED_INIT; //qs_mul[qx][qy]; } else { //exp_match += (1 - sc_neq[(int)qx][(int)qy] / mismatch_score); switch (x) { case 'A': switch (y) { case 'C': exp_match += (1 - sc_neqAC[(int)qx][(int)qy]/mismatch_score); break; case 'G': exp_match += (1 - sc_neqAG[(int)qx][(int)qy]/mismatch_score); break; case 'T': exp_match += (1 - sc_neqAT[(int)qx][(int)qy]/mismatch_score); break; } break; case 'C': switch (y) { case 'A': exp_match += (1 - sc_neqCA[(int)qx][(int)qy]/mismatch_score); break; case 'G': exp_match += (1 - sc_neqCG[(int)qx][(int)qy]/mismatch_score); break; case 'T': exp_match += (1 - sc_neqCT[(int)qx][(int)qy]/mismatch_score); break; } break; case 'G': switch (y) { case 'A': exp_match += (1 - sc_neqGA[(int)qx][(int)qy]/mismatch_score); break; case 'C': exp_match += (1 - sc_neqGC[(int)qx][(int)qy]/mismatch_score); break; case 'T': exp_match += (1 - sc_neqGT[(int)qx][(int)qy]/mismatch_score); break; } break; case 'T': switch (y) { case 'A': exp_match += (1 - sc_neqTA[(int)qx][(int)qy]/mismatch_score); break; case 'C': exp_match += (1 - sc_neqTC[(int)qx][(int)qy]/mismatch_score); break; case 'G': exp_match += (1 - sc_neqTG[(int)qx][(int)qy]/mismatch_score); break; } break; } if (qx > qy) { ai->data[base_left + i] = x; ai->qscore[base_left + i] = qx; } else { ai->data[base_left + i] = y; ai->qscore[base_left + i] = qy; } } } } if (ol_size == 0) return (0); return (exp_match / (double)ol_size); } */ void mstrrev (char * s) { char * q = s; while (q && *q) ++q; for (--q; s < q; ++s, --q) { *s = *s ^ *q; *q = *s ^ *q; *s = *s ^ *q; } } void mstrcpl (char * s) { if (!s) return; while (*s) { switch (*s) { case 'A': case 'a': *s = 'T'; break; case 'C': case 'c': *s = 'G'; break; case 'G': case 'g': *s = 'C'; break; case 'T': case 't': *s = 'A'; break; } ++s; } } int validate_input (int nleft, int nright) { if (!nleft || !nright) { fprintf (stderr, "ERROR: At least one of the input files contains no records.\n"); return (0); } if (nleft != nright) { fprintf (stderr, "ERROR: Number of records in the two input files does not match.\n"); return (0); } return (1); } char * makefilename (const char * prefix, const char * suffix) { char * filename; filename = (char *) malloc ((strlen(prefix) + strlen(suffix) + 1) * sizeof (char)); strcpy (filename, prefix); strcat (filename, suffix); return (filename); } int main (int argc, char * argv[]) { int i, trim_len_fw, trim_len_rev; //struct reads_info ** ri_left; //struct reads_info ** ri_right; // int cnt_reads_left; // int cnt_reads_right; int read_size; char * out[4]; FILE * fd[4]; int ass; double uncalled_forward, uncalled_reverse; struct user_args sw; struct emp_freq * ef; struct block_t fwd_block; struct block_t rev_block; struct block_t dbfwd_block; struct block_t dbrev_block; char two_piece; int elms; int flip = 0; struct read_t ** fwd; struct read_t ** rev; int tmp_elms = 0; int end = 0; if (!decode_switches (argc, argv, &sw)) { /* TODO: Validate reads names */ usage (); return (EXIT_FAILURE); } ef = (struct emp_freq *)malloc (sizeof(struct emp_freq)); ef->freqa = ef->freqc = ef->freqg = ef->freqt = ef->total = ef->pa = ef->pc = ef->pg = ef->pt = ef->q = 0.25; init_scores(match_score, mismatch_score, ef); /* read the two fastq files containing the left-end and right-end reads */ //ri_left = read_fastq(sw.fastq_left, &cnt_reads_left); //ri_right = read_fastq(sw.fastq_right, &cnt_reads_right); //if (!validate_input (cnt_reads_left, cnt_reads_right)) // { // return (EXIT_FAILURE); // } // read_size = strlen (ri_left[0]->data); /* TODO: THIS IS WRONG!!!! TO GET EMPIRICAL FREQUENCIES WE NEED TO READ THE WHOLE FILE :-( */ //ef = get_emp_freq (cnt_reads_right, read_size, fwd_block.reads, rev_block.reads); /* reverse the right ends */ /* allocate memory for the assembled results */ /* ai = (struct asm_info *) malloc (cnt_reads_left * sizeof(struct asm_info)); for (i = 0; i < cnt_reads_left; ++i) { ai[i].data = (char *) malloc ((2 * read_size + 1) * sizeof(char)); ai[i].quality_score = (char *) malloc ((2 * read_size + 1) * sizeof(char)); } */ init_fastq_reader_double_buffer (sw.fastq_left, sw.fastq_right, sw.memory, &fwd_block, &rev_block, &dbfwd_block, &dbrev_block); //init_fastq_reader (sw.fastq_left, sw.fastq_right, 100000000, &fwd_block, &rev_block); /* construct output file names */ out[0] = makefilename (sw.outfile, ".assembled.fastq"); out[1] = makefilename (sw.outfile, ".unassembled.forward.fastq"); out[2] = makefilename (sw.outfile, ".unassembled.reverse.fastq"); out[3] = makefilename (sw.outfile, ".discarded.fastq"); fd[0] = fopen (out[0], "w"); fd[1] = fopen (out[1], "w"); fd[2] = fopen (out[2], "w"); fd[3] = fopen (out[3], "w"); omp_set_num_threads (sw.threads); while (1) { if (flip == 0) { if (end == 1) break; tmp_elms = elms = get_next_reads (&fwd_block, &rev_block); if (!elms) break; read_size = strlen (fwd_block.reads[0]->data); flip = 1; } if (flip == 1) { // printf ("Flipping!\n"); fwd = fwd_block.reads; rev = rev_block.reads; elms = tmp_elms; } else { // printf ("Flipping!\n"); fwd = dbfwd_block.reads; rev = dbrev_block.reads; elms = tmp_elms; } //#pragma omp parallel shared(fwd_block.reads, rev_block.reads, ai) private(i, ass, uncalled, kassian_result) #pragma omp parallel private(i, ass, uncalled_forward, uncalled_reverse) { #pragma omp master { if (flip == 1) { tmp_elms = db_get_next_reads (&dbfwd_block, &dbrev_block, &fwd_block, &rev_block); flip = 2; } else { tmp_elms = db_get_next_reads (&fwd_block, &rev_block, &dbfwd_block, &dbrev_block); flip = 1; } if (!tmp_elms) {flip = 0; end = 1;} } /* do the memory reading here */ #pragma omp for schedule (runtime) for (i = 0; i < elms; ++ i) { mstrrev (rev[i]->data); mstrcpl (rev[i]->data); mstrrev (rev[i]->qscore); if (sw.emp_freqs == 0) { ass = assembly (fwd[i], rev[i], match_score, mismatch_score, &sw); *(fwd[i]->qscore - 1) = ass; if (!ass) if (trim (fwd[i], sw.qual_thres, read_size, &uncalled_forward) < sw.min_trim_len || trim_cpl (rev[i], sw.qual_thres, read_size, &uncalled_reverse) < sw.min_trim_len || uncalled_forward >= sw.max_uncalled || uncalled_reverse >= sw.max_uncalled) { *(fwd[i]->qscore - 1) = 2; } else { *(fwd[i]->qscore - 1) = 3; } } else { ass = assembly_ef (fwd[i], rev[i], match_score, mismatch_score, ef, &sw); *(fwd[i]->qscore - 1) = ass; //TODO: Probably?? } } } for ( i = 0; i < elms; ++ i) { two_piece = *(fwd[i]->data - 1); *(fwd[i]->data - 1) = 0; if (*(fwd[i]->qscore - 1) == 1) /* assembled */ { *(fwd[i]->qscore - 1) = 0; fprintf (fd[0], "%s\n", fwd[i]->header); if (!two_piece) { fprintf (fd[0], "%s\n", fwd[i]->data); } else { fprintf (fd[0], "%s", fwd[i]->data); fprintf (fd[0], "%s\n", rev[i]->data); } fprintf (fd[0], "+\n"); if (!two_piece) { fprintf (fd[0], "%s\n", fwd[i]->qscore); } else { fprintf (fd[0], "%s", fwd[i]->qscore); fprintf (fd[0], "%s\n", rev[i]->qscore); } } else if (*(fwd[i]->qscore - 1) == 2) /* not assembled */ { *(fwd[i]->qscore - 1) = 0; /* discarded reads*/ /* Maybe consider printing the untrimmed sequences */ fprintf (fd[3], "%s\n", fwd[i]->header); fprintf (fd[3], "%s\n+\n%s\n", fwd[i]->data, fwd[i]->qscore); fprintf (fd[3], "%s\n", rev[i]->header); fprintf (fd[3], "%s\n+\n%s\n", rev[i]->data, rev[i]->qscore); /* printing the reverse compliment of the original sequence */ } else /* unassembled reads*/ { *(fwd[i]->qscore - 1) = 0; fprintf (fd[1], "%s\n", fwd[i]->header); fprintf (fd[2], "%s\n", rev[i]->header); fprintf (fd[1], "%s\n+\n%s\n", fwd[i]->data, fwd[i]->qscore); fprintf (fd[2], "%s\n+\n%s\n", rev[i]->data, rev[i]->qscore); /* printing the reverse compliment of the original sequence */ } } } free (ef); free (out[0]); free (out[1]); free (out[2]); free (out[3]); destroy_reader (); /* TODO: Fix those file closings */ fclose (fd[0]); fclose (fd[1]); fclose (fd[2]); fclose (fd[3]); return (0); }
TimeCluster.h
/****************************************************************************** ** Copyright (c) 2015, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of SeisSol. * * @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer) * * @section LICENSE * Copyright (c) 2013-2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * LTS cluster in SeisSol. **/ #ifndef TIMECLUSTER_H_ #define TIMECLUSTER_H_ #ifdef USE_MPI #include <mpi.h> #include <list> #endif #include <Initializer/typedefs.hpp> #include <SourceTerm/typedefs.hpp> #include <utils/logger.h> #include <Initializer/LTS.h> #include <Initializer/tree/LTSTree.hpp> #include <Kernels/Time.h> #include <Kernels/Local.h> #include <Kernels/Neighbor.h> #include <Kernels/DynamicRupture.h> #include <Kernels/Plasticity.h> #include <Solver/FreeSurfaceIntegrator.h> #include <Monitoring/LoopStatistics.h> #include <Kernels/TimeCommon.h> #ifdef ACL_DEVICE #include <device.h> #include <Solver/Pipeline/DrPipeline.h> #endif namespace seissol { namespace time_stepping { class TimeCluster; } namespace kernels { class ReceiverCluster; } } /** * Time cluster, which represents a collection of elements having the same time step width. **/ class seissol::time_stepping::TimeCluster { public: //! cluster id on this rank const unsigned int m_clusterId; //! global cluster cluster id const unsigned int m_globalClusterId; private: bool usePlasticity; //! number of time steps unsigned long m_numberOfTimeSteps; /* * integrators */ //! time kernel kernels::Time m_timeKernel; //! local kernel kernels::Local m_localKernel; //! neighbor kernel kernels::Neighbor m_neighborKernel; kernels::DynamicRupture m_dynamicRuptureKernel; /* * mesh structure */ struct MeshStructure *m_meshStructure; /* * global data */ //! global data structures GlobalData *m_globalDataOnHost{nullptr}; GlobalData *m_globalDataOnDevice{nullptr}; #ifdef ACL_DEVICE device::DeviceInstance& device = device::DeviceInstance::getInstance(); dr::pipeline::DrPipeline drPipeline; #endif /* * element data and mpi queues */ #ifdef USE_MPI //! pending copy region sends std::list< MPI_Request* > m_sendQueue; //! pending ghost region receives std::list< MPI_Request* > m_receiveQueue; #endif seissol::initializers::TimeCluster* m_clusterData; seissol::initializers::TimeCluster* m_dynRupClusterData; seissol::initializers::LTS* m_lts; seissol::initializers::DynamicRupture* m_dynRup; //! time step width of the performed time step. double m_timeStepWidth; //! Mapping of cells to point sources sourceterm::CellToPointSourcesMapping const* m_cellToPointSources; //! Number of mapping of cells to point sources unsigned m_numberOfCellToPointSourcesMappings; //! Point sources sourceterm::PointSources const* m_pointSources; //! true if dynamic rupture faces are present bool m_dynamicRuptureFaces; enum ComputePart { LocalInterior = 0, NeighborInterior, DRNeighborInterior, #ifdef USE_MPI LocalCopy, NeighborCopy, DRNeighborCopy, #endif DRFrictionLawCopy, DRFrictionLawInterior, PlasticityCheck, PlasticityYield, NUM_COMPUTE_PARTS }; long long m_flops_nonZero[NUM_COMPUTE_PARTS]; long long m_flops_hardware[NUM_COMPUTE_PARTS]; //! Tv parameter for plasticity double m_tv; //! Relax time for plasticity double m_oneMinusIntegratingFactor; //! Stopwatch of TimeManager LoopStatistics* m_loopStatistics; unsigned m_regionComputeLocalIntegration; unsigned m_regionComputeNeighboringIntegration; unsigned m_regionComputeDynamicRupture; kernels::ReceiverCluster* m_receiverCluster; #ifdef USE_MPI /** * Receives the copy layer data from relevant neighboring MPI clusters. **/ void receiveGhostLayer(); /** * Sends the associated regions of the copy layer to relevant neighboring MPI clusters **/ void sendCopyLayer(); #if defined(_OPENMP) && defined(USE_COMM_THREAD) /** * Inits Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void initReceiveGhostLayer(); /** * Inits Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void initSendCopyLayer(); /** * Waits until the initialization of the communication is finished. **/ void waitForInits(); #endif /** * Tests for pending ghost layer communication. **/ bool testForGhostLayerReceives(); /** * Tests for pending copy layer communication. **/ bool testForCopyLayerSends(); #endif /** * Writes the receiver output if applicable (receivers present, receivers have to be written). **/ void writeReceivers(); /** * Computes the source terms if applicable. **/ void computeSources(); /** * Computes dynamic rupture. **/ void computeDynamicRupture( seissol::initializers::Layer& layerData ); /** * Computes all cell local integration. * * This are: * * time integration * * volume integration * * local boundary integration * * Remark: After this step the DOFs are only updated half with the boundary contribution * of the neighborings cells missing. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param io_buffers time integration buffers. * @param io_derivatives time derivatives. * @param io_dofs degrees of freedom. **/ void computeLocalIntegration( seissol::initializers::Layer& i_layerData ); /** * Computes the contribution of the neighboring cells to the boundary integral. * * Remark: After this step (in combination with the local integration) the DOFs are at the next time step. * TODO: This excludes dynamic rupture contribution. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param i_faceNeighbors pointers to neighboring time buffers or derivatives. * @param io_dofs degrees of freedom. **/ void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData ); #ifndef ACL_DEVICE template<bool usePlasticity> std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData) { SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION ) m_loopStatistics->begin(m_regionComputeNeighboringIntegration); real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors); CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping); CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation); PlasticityData* plasticity = i_layerData.var(m_lts->plasticity); real (*pstrain)[7 * NUMBER_OF_ALIGNED_BASIS_FUNCTIONS] = i_layerData.var(m_lts->pstrain); unsigned numberOTetsWithPlasticYielding = 0; kernels::NeighborData::Loader loader; loader.load(*m_lts, i_layerData); real *l_timeIntegrated[4]; real *l_faceNeighbors_prefetch[4]; #ifdef _OPENMP #pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping) reduction(+:numberOTetsWithPlasticYielding) #endif for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) { auto data = loader.entry(l_cell); seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel, data.cellInformation.ltsSetup, data.cellInformation.faceTypes, m_subTimeStart, m_timeStepWidth, faceNeighbors[l_cell], #ifdef _OPENMP *reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])), #else *reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS), #endif l_timeIntegrated); #ifdef ENABLE_MATRIX_PREFETCH #pragma message("the current prefetch structure (flux matrices and tDOFs is tuned for higher order and shouldn't be harmful for lower orders") l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][1] : drMapping[l_cell][1].godunov; l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][2] : drMapping[l_cell][2].godunov; l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][3] : drMapping[l_cell][3].godunov; // fourth face's prefetches if (l_cell < (i_layerData.getNumberOfCells()-1) ) { l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ? faceNeighbors[l_cell+1][0] : drMapping[l_cell+1][0].godunov; } else { l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3]; } #endif m_neighborKernel.computeNeighborsIntegral( data, drMapping[l_cell], #ifdef ENABLE_MATRIX_PREFETCH l_timeIntegrated, l_faceNeighbors_prefetch #else l_timeIntegrated #endif ); if constexpr (usePlasticity) { numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor, m_timeStepWidth, m_tv, m_globalDataOnHost, &plasticity[l_cell], data.dofs, pstrain[l_cell] ); } #ifdef INTEGRATE_QUANTITIES seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth, i_layerData, l_cell, dofs[l_cell] ); #endif // INTEGRATE_QUANTITIES } const long long nonZeroFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_nonZero[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_nonZero[PlasticityYield]; const long long hardwareFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_hardware[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_hardware[PlasticityYield]; m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells()); return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity}; } #endif // ACL_DEVICE void computeLocalIntegrationFlops(unsigned numberOfCells, CellLocalInformation const* cellInformation, long long& nonZeroFlops, long long& hardwareFlops); void computeNeighborIntegrationFlops( unsigned numberOfCells, CellLocalInformation const* cellInformation, CellDRMapping const (*drMapping)[4], long long& nonZeroFlops, long long& hardwareFlops, long long& drNonZeroFlops, long long& drHardwareFlops ); void computeDynamicRuptureFlops( seissol::initializers::Layer& layerData, long long& nonZeroFlops, long long& hardwareFlops ); void computeFlops(); //! Update relax time for plasticity void updateRelaxTime() { m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-m_timeStepWidth / m_tv) : 1.0; } public: //! flags identifiying if the respective part is allowed to be updated struct { bool localCopy; bool neighboringCopy; bool localInterior; bool neighboringInterior; } m_updatable; #ifdef USE_MPI //! send true LTS buffers volatile bool m_sendLtsBuffers; #endif //! reset lts buffers before performing time predictions volatile bool m_resetLtsBuffers; /* Sub start time of width respect to the next cluster; use 0 if not relevant, for example in GTS. * LTS requires to evaluate a partial time integration of the derivatives. The point zero in time refers to the derivation of the surrounding time derivatives, which * coincides with the last completed time step of the next cluster. The start/end of the time step is the start/end of this clusters time step relative to the zero point. * Example: * <verb> * 5 dt * |-----------------------------------------------------------------------------------------| <<< Time stepping of the next cluster (Cn) (5x larger than the current). * | | | | | | * |*****************|*****************|+++++++++++++++++| | | <<< Status of the current cluster. * | | | | | | * |-----------------|-----------------|-----------------|-----------------|-----------------| <<< Time stepping of the current cluster (Cc). * 0 dt 2dt 3dt 4dt 5dt * </verb> * * In the example above two clusters are illustrated: Cc and Cn. Cc is the current cluster under consideration and Cn the next cluster with respect to LTS terminology. * Cn is currently at time 0 and provided Cc with derivatives valid until 5dt. Cc updated already twice and did its last full update to reach 2dt (== subTimeStart). Next * computeNeighboringCopy is called to accomplish the next full update to reach 3dt (+++). Besides working on the buffers of own buffers and those of previous clusters, * Cc needs to evaluate the time prediction of Cn in the interval [2dt, 3dt]. */ double m_subTimeStart; //! number of full updates the cluster has performed since the last synchronization unsigned int m_numberOfFullUpdates; //! simulation time of the last full update (this is a complete volume and boundary integration) double m_fullUpdateTime; //! final time of the prediction (derivatives and time integrated DOFs). double m_predictionTime; //! time of the next receiver output double m_receiverTime; /** * Constructs a new LTS cluster. * * @param i_clusterId id of this cluster with respect to the current rank. * @param i_globalClusterId global id of this cluster. * @param usePlasticity true if using plasticity * @param i_timeKernel time integration kernel. * @param i_volumeKernel volume integration kernel. * @param i_boundaryKernel boundary integration kernel. * @param i_meshStructure mesh structure of this cluster. * @param i_copyCellInformation cell information in the copy layer. * @param i_interiorCellInformation cell information in the interior. * @param i_globalData global data. * @param i_copyCellData cell data in the copy layer. * @param i_interiorCellData cell data in the interior. * @param i_cells degrees of freedom, time buffers, time derivatives. **/ TimeCluster(unsigned int i_clusterId, unsigned int i_globalClusterId, bool usePlasticity, MeshStructure *i_meshStructure, CompoundGlobalData i_globalData, seissol::initializers::TimeCluster* i_clusterData, seissol::initializers::TimeCluster* i_dynRupClusterData, seissol::initializers::LTS* i_lts, seissol::initializers::DynamicRupture* i_dynRup, LoopStatistics* i_loopStatistics); /** * Destructor of a LTS cluster. * TODO: Currently prints only statistics in debug mode. **/ ~TimeCluster(); double timeStepWidth() const { return m_timeStepWidth; } void setTimeStepWidth(double timestep) { m_timeStepWidth = timestep; updateRelaxTime(); m_dynamicRuptureKernel.setTimeStepWidth(timestep); } /** * Adds a source to the cluster. * * @param i_meshId mesh id of the point of interest. **/ void addSource( unsigned int i_meshId ); /** * Sets the pointer to the cluster's point sources * * @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources * @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources * @param i_pointSources pointer to all point sources used on this cluster */ void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources, unsigned i_numberOfCellToPointSourcesMappings, sourceterm::PointSources const* i_pointSources ); void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) { m_receiverCluster = receiverCluster; } /** * Set Tv constant for plasticity. */ void setTv(double tv) { m_tv = tv; updateRelaxTime(); } #ifdef USE_MPI /** * Computes cell local integration of all cells in the copy layer and initiates the corresponding communication. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration * * @return true if the update (incl. communication requests), false if the update failed due to unfinshed sends of copy data to MPI neighbors. **/ bool computeLocalCopy(); #endif /** * Computes cell local integration of all cells in the interior. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration **/ void computeLocalInterior(); #ifdef USE_MPI /** * Computes the neighboring contribution to the boundary integral for all cells in the copy layer. * * @return true if the update (incl. communication requests), false if the update failed due to missing data from neighboring ranks. **/ bool computeNeighboringCopy(); #endif /** * Computes the neighboring contribution to the boundary integral for all cells in the interior. **/ void computeNeighboringInterior(); /** * Returns number of cells managed by this cluster. * @return Number of cells */ long getNumberOfCells() const; #if defined(_OPENMP) && defined(USE_MPI) && defined(USE_COMM_THREAD) /** * Tests for pending ghost layer communication, active when using communication thread **/ void pollForGhostLayerReceives(); /** * Polls for pending copy layer communication, active when using communication thread **/ void pollForCopyLayerSends(); /** * Start Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void startReceiveGhostLayer(); /** * start Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void startSendCopyLayer(); #endif }; #endif
syr2k.limlam.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* syr2k.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "syr2k.h" /* Array initialization. */ static void init_array(int n, int m, DATA_TYPE *alpha, DATA_TYPE *beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j; *alpha = 1.5; *beta = 1.2; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { A[i][j] = (DATA_TYPE) ((i*j+1)%n) / n; B[i][j] = (DATA_TYPE) ((i*j+2)%m) / m; } for (i = 0; i < n; i++) for (j = 0; j < n; j++) { C[i][j] = (DATA_TYPE) ((i*j+3)%n) / m; } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_2D(C,N,N,n,n)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("C"); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]); } POLYBENCH_DUMP_END("C"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_syr2k(int n, int m, DATA_TYPE alpha, DATA_TYPE beta, DATA_TYPE POLYBENCH_2D(C,N,N,n,n), DATA_TYPE POLYBENCH_2D(A,N,M,n,m), DATA_TYPE POLYBENCH_2D(B,N,M,n,m)) { int i, j, k; //BLAS PARAMS //UPLO = 'L' //TRANS = 'N' //A is NxM //B is NxM //C is NxN #pragma scop for (i = 0; i < _PB_N; i++) { #pragma omp parallel for for (j = 0; j <= i; j++) { C[i][j] *= beta; for (k = 0; k < _PB_M; k++) { C[i][j] += A[j][k]*alpha*B[i][k] + B[j][k]*alpha*A[i][k]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; double footprint = 8*(n*n + 2*n*m); // HAVERFORD added code double FP_ops = 3.0 * m * (n + 1) * n; // HAVERFORD added code #ifdef POLYBENCH_GFLOPS polybench_set_program_flops(FP_ops); // HAVERFORD addition #endif #if defined POLYFORD_VERBOSE printf("Starting %s, n=%8d, m=%8d, Footprint %8.4g M, Source FP ops=%8.4g G\n", __FILE__, n, m, footprint / (1024 * 1024), FP_ops/1000000000.0); #endif /* Variable declaration/allocation. */ DATA_TYPE alpha; DATA_TYPE beta; POLYBENCH_2D_ARRAY_DECL(C,DATA_TYPE,N,N,n,n); POLYBENCH_2D_ARRAY_DECL(A,DATA_TYPE,N,M,n,m); POLYBENCH_2D_ARRAY_DECL(B,DATA_TYPE,N,M,n,m); /* Initialize array(s). */ init_array (n, m, &alpha, &beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_syr2k (n, m, alpha, beta, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C))); /* Be clean. */ POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
TaNra.h
// Copyright 2015 Christina Teflioudi // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* * File: TANRA_all.h * Author: chteflio * * Created on August 24, 2015, 9:53 AM */ #ifndef TANRA_ALL_H #define TANRA_ALL_H namespace mips { class TaNra : public Mip { ProbeBucket probeBucket, probeBucketK; LempArguments args; std::vector<RetrievalArguments> retrArg; inline void printAlgoName(const VectorMatrix& leftMatrix) { if (args.isTARR) { logging << "TA_NRA (RR)" << "\t" << args.threads << "\t"; std::cout << "[ALGORITHM] TA_NRA (ROUND ROBIN) with " << args.threads << " thread(s)" << std::endl; } else { logging << "TA_NRA (MAX)" << "\t" << args.threads << "\t"; std::cout << "[ALGORITHM] TA_NRA (MAX HEAP) with " << args.threads << " thread(s)" << std::endl; } logging << "P(" << probeMatrix.rowNum << "x" << (0 + probeMatrix.colNum) << ")\t"; logging << "Q^T(" << leftMatrix.rowNum << "x" << (0 + leftMatrix.colNum) << ")\t"; } inline void initializeInternal(std::vector<VectorMatrix>& queryMatrices, VectorMatrix& leftMatrix) { std::cout << "[RETRIEVAL] QueryMatrix contains " << leftMatrix.rowNum << " vectors with dimensionality " << (0 + leftMatrix.colNum) << std::endl; row_type myNumThreads = args.threads; if (leftMatrix.rowNum < args.threads) { myNumThreads = leftMatrix.rowNum; std::cout << "[WARNING] Query matrix contains too few elements. Suboptimal running with " << myNumThreads << " thread(s)" << std::endl; } omp_set_num_threads(myNumThreads); queryMatrices.resize(myNumThreads); splitMatrices(leftMatrix, queryMatrices); for (row_type i = 0; i < myNumThreads; i++) { retrArg[i].initializeBasics(queryMatrices[i], probeMatrix, LEMP_TANRA, args.theta, args.k, args.threads, args.R, args.epsilon, 0, 0, false, args.isTARR); retrArg[i].init(probeMatrix.rowNum); retrArg[i].clear(); } } public: inline void setTheta(double theta) { args.theta = theta; } inline TaNra(InputArguments& input, bool isTARR) { args.copyInputArguments(input); args.isTARR = isTARR; // now do the logging logging.open(args.logFile.c_str(), std::ios_base::app); if (!logging.is_open()) { std::cout << "[WARNING] No log will be created!" << std::endl; } else { std::cout << "[INFO] Logging in " << args.logFile << std::endl; } omp_set_num_threads(args.threads); retrArg.resize(args.threads); }; inline void initialize(VectorMatrix& rightMatrix) { std::cout << "[INIT] ProbeMatrix contains " << rightMatrix.rowNum << " vectors with dimensionality " << (0 + rightMatrix.colNum) << std::endl; probeMatrix = rightMatrix; timer.start(); probeBucket.init(probeMatrix, 0, probeMatrix.rowNum, args); // initialize probeBucket.bucketScanThreshold = 0; probeBucket.normL2.second = std::numeric_limits<double>::max(); retriever_ptr rPtr(new tanraRetriever()); probeBucket.ptrRetriever = rPtr; if (probeBucket.ptrIndexes[SL] == 0) probeBucket.ptrIndexes[SL] = new QueueElementLists(); static_cast<QueueElementLists*> (probeBucket.ptrIndexes[SL])->initializeLists(probeMatrix, 0, probeMatrix.rowNum); timer.stop(); dataPreprocessingTimeRight += timer.elapsedTime().nanos(); } inline ~TaNra() { logging.close(); } inline void runAboveTheta(VectorMatrix& leftMatrix, Results& results) { printAlgoName(leftMatrix); std::vector<VectorMatrix> queryMatrices; initializeInternal(queryMatrices, leftMatrix); results.resultsVector.resize(args.threads); std::vector<row_type> blockOffsets; blockOffsets.push_back(0); std::cout << "[RETRIEVAL] Retrieval (theta = " << args.theta << ") starts ..." << std::endl; logging << "theta(" << args.theta << ")\t"; timer.start(); comp_type comparisons = 0; #pragma omp parallel reduction(+ : comparisons) { row_type tid = omp_get_thread_num(); bucketize(retrArg[tid].queryBatches, queryMatrices[tid], blockOffsets, args); probeBucket.ptrRetriever->run(probeBucket, &retrArg[tid]); results.moveAppend(retrArg[tid].results, tid); comparisons += retrArg[tid].comparisons; } timer.stop(); retrievalTime += timer.elapsedTime().nanos(); totalComparisons += comparisons; std::cout << "[RETRIEVAL] ... and is finished with " << results.getResultSize() << " results" << std::endl; logging << results.getResultSize() << "\t"; outputStats(); } }; } #endif /* TANRA_ALL_H */
shortcut_layer.c
#include "shortcut_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "blas.h" #include "utils.h" #include "gemm.h" #include <stdio.h> #include <assert.h> layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c, float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalization, ACTIVATION activation, int train) { fprintf(stderr, "Shortcut Layer: "); int i; for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]); layer l = { (LAYER_TYPE)0 }; l.train = train; l.type = SHORTCUT; l.batch = batch; l.activation = activation; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; l.layers_output = layers_output; l.layers_delta = layers_delta; l.weights_type = weights_type; l.weights_normalization = weights_normalization; l.learning_rate_scale = 1; // not necessary //l.w = w2; //l.h = h2; //l.c = c2; l.w = l.out_w = w; l.h = l.out_h = h; l.c = l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; //if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2); l.index = l.input_layers[0]; if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.output = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.nweights = 0; if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1); else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c; if (l.nweights > 0) { l.weights = (float*)calloc(l.nweights, sizeof(float)); float scale = sqrt(2. / l.nweights); for (i = 0; i < l.nweights; ++i) l.weights[i] = 1;// +0.01*rand_uniform(-1, 1);// scale*rand_uniform(-1, 1); // rand_normal(); if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float)); l.update = update_shortcut_layer; } l.forward = forward_shortcut_layer; l.backward = backward_shortcut_layer; #ifndef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float)); #endif // GPU #ifdef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs); l.forward_gpu = forward_shortcut_layer_gpu; l.backward_gpu = backward_shortcut_layer_gpu; if (l.nweights > 0) { l.update_gpu = update_shortcut_layer_gpu; l.weights_gpu = cuda_make_array(l.weights, l.nweights); if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights); } if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n); l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n); l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n); #endif // GPU l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.; if (l.weights_type) l.bflops *= 2; fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF\n", l.weights_type, l.weights_normalization, l.out_w, l.out_h, l.out_c, l.bflops); return l; } void resize_shortcut_layer(layer *l, int w, int h, network *net) { //assert(l->w == l->out_w); //assert(l->h == l->out_h); l->w = l->out_w = w; l->h = l->out_h = h; l->outputs = w*h*l->out_c; l->inputs = l->outputs; if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float)); int i; for (i = 0; i < l->n; ++i) { int index = l->input_layers[i]; l->input_sizes[i] = net->layers[index].outputs; l->layers_output[i] = net->layers[index].output; l->layers_delta[i] = net->layers[index].delta; assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h); } if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); if (l->train) { cuda_free(l->delta_gpu); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); } float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *)); float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *)); for (i = 0; i < l->n; ++i) { const int index = l->input_layers[i]; layers_output_gpu[i] = net->layers[index].output_gpu; layers_delta_gpu[i] = net->layers[index].delta_gpu; } memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int)); memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*)); memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*)); free(layers_output_gpu); free(layers_delta_gpu); if (l->activation == SWISH || l->activation == MISH) { cuda_free(l->activation_input_gpu); l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs); } #endif } void forward_shortcut_layer(const layer l, network_state state) { int from_w = state.net.layers[l.index].w; int from_h = state.net.layers[l.index].h; int from_c = state.net.layers[l.index].c; if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) { int size = l.batch * l.w * l.h * l.c; int i; #pragma omp parallel for for(i = 0; i < size; ++i) l.output[i] = state.input[i] + state.net.layers[l.index].output[i]; } else { shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalization); } //copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); //shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output); else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output); else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation); } void backward_shortcut_layer(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta); else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta); else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalization); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); } void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1); axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1); scal_cpu(l.nweights, momentum, l.weight_updates, 1); } } #ifdef GPU void forward_shortcut_layer_gpu(const layer l, network_state state) { //copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1); //simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu); //shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //----------- //if (l.outputs == l.input_sizes[0]) //if(l.n == 1 && l.nweights == 0) //{ // input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c, // state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //} //else { shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalization); } if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_shortcut_layer_gpu(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu, l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalization); //axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1); //shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu); } void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay, float loss_scale) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale / loss_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; reset_nan_and_inf(l.weight_updates_gpu, l.nweights); fix_nan_and_inf(l.weights_gpu, l.nweights); //constrain_weight_updates_ongpu(l.nweights, 1, l.weights_gpu, l.weight_updates_gpu); constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1); /* cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream())); for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weight_updates[i]); printf(" l.nweights = %d - updates \n", l.nweights); for (int i = 0; i < l.nweights; ++i) printf(" %f, ", l.weights[i]); printf(" l.nweights = %d \n\n", l.nweights); */ //axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); //fill_ongpu(l.nweights, 0, l.weight_updates_gpu, 1); //if (l.clip) { // constrain_ongpu(l.nweights, l.clip, l.weights_gpu, 1); //} } } void pull_shortcut_layer(layer l) { constrain_ongpu(l.nweights, 1, l.weight_updates_gpu, 1); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream())); } void push_shortcut_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); } #endif
common.h
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <cstdio> #include <string> #include <vector> #include <sstream> #include <cstdint> #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <iterator> #include <type_traits> #include <iomanip> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = char(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t ) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t ) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out= static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (x >= 1e300) { return 1e300; } else if(x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static,1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
flush.c
int main (void) { int a = 1, b=2; #pragma omp parallel if (a) num_threads(4) { #pragma omp flush if (a != 0) { #pragma omp flush(a,b) } if (a != 0) { #pragma omp barrier } } return 0; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *composite_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image, modified by he composition % % o composite_image: the composite (source) image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for destination preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also definate that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static inline MagickRealType MagickMin(const MagickRealType x, const MagickRealType y) { if (x < y) return(x); return(y); } static inline MagickRealType MagickMax(const MagickRealType x, const MagickRealType y) { if (x > y) return(x); return(y); } static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *composite_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *composite_view, *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; composite_view=AcquireVirtualCacheView(composite_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(composite_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; size_t channels; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) composite_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows)) { p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset, composite_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(composite_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Sa, Sc; register ssize_t i; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) composite_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) composite_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: destination color. */ if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } (void) GetOneVirtualPixel(composite_image,x-x_offset,y-y_offset, source,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait composite_traits=GetPixelChannelTraits(composite_image, channel); if ((traits == UndefinedPixelTrait) || (composite_traits == UndefinedPixelTrait)) continue; q[i]=source[channel]; } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized destination alpha. */ if (GetPixelReadMask(composite_image,p) == 0) { p+=GetPixelChannels(composite_image); channels=GetPixelChannels(composite_image); if (p >= (pixels+channels*composite_image->columns)) p=pixels; q+=GetPixelChannels(image); continue; } Sa=QuantumScale*GetPixelAlpha(composite_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa*(-Da)+Sa+Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait composite_traits=GetPixelChannelTraits(composite_image, channel); if ((traits == UndefinedPixelTrait) || (composite_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { if (channel != AlphaPixelChannel) { /* Copy channel. */ q[i]=GetPixelChannel(composite_image,channel,p); continue; } /* Set alpha channel. */ q[i]=ClampToQuantum(QuantumRange*alpha); continue; } /* Sc: source color. Dc: destination color. */ Sc=(MagickRealType) GetPixelChannel(composite_image,channel,p); Dc=(MagickRealType) q[i]; gamma=PerceptibleReciprocal(alpha); q[i]=ClampToQuantum(gamma*(Sa*Sc-Sa*Da*Dc+Da*Dc)); } p+=GetPixelChannels(composite_image); channels=GetPixelChannels(composite_image); if (p >= (pixels+channels*composite_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *composite_view, *image_view; GeometryInfo geometry_info; Image *composite_image, *destination_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType amount, destination_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite!= (Image *) NULL); assert(composite->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); composite_image=CloneImage(composite,0,0,MagickTrue,exception); if (composite_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(composite_image,image->colorspace,exception); if ((image->alpha_trait == BlendPixelTrait) && (composite_image->alpha_trait != BlendPixelTrait)) (void) SetImageAlphaChannel(composite_image,SetAlphaChannel,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,composite_image,clip_to_self,x_offset, y_offset,exception); composite_image=DestroyImage(composite_image); return(status); } destination_image=(Image *) NULL; amount=0.5; destination_dissolve=1.0; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) composite_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) composite_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; composite_view=AcquireVirtualCacheView(composite_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(composite_image,image,composite_image->rows,1) #endif for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, composite_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) composite_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(composite_image,p) == 0) { p+=GetPixelChannels(composite_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(composite_image); i++) { PixelChannel channel=GetPixelChannelChannel(composite_image,i); PixelTrait composite_traits=GetPixelChannelTraits(composite_image, channel); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (composite_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(composite_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); composite_image=DestroyImage(composite_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: case IntensityCompositeOp: { /* Modify destination outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *composite_view, *destination_view; const char *value; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (destination_image == (Image *) NULL) { composite_image=DestroyImage(composite_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); composite_image=DestroyImage(composite_image); destination_image=DestroyImage(destination_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); composite_view=AcquireVirtualCacheView(composite_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(composite_image); continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(composite_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(composite_image,p), blur.y1*QuantumScale*GetPixelGreen(composite_image,p), blur.x2*QuantumScale*GetPixelRed(composite_image,p), blur.y2*QuantumScale*GetPixelGreen(composite_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelInfoPixel(destination_image,&pixel,q); p+=GetPixelChannels(composite_image); q+=GetPixelChannels(destination_image); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); composite_view=DestroyCacheView(composite_view); destination_view=DestroyCacheView(destination_view); composite_image=DestroyImage(composite_image); composite_image=destination_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *composite_view, *destination_view, *image_view; const char *value; PixelInfo pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ destination_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (destination_image == (Image *) NULL) { composite_image=DestroyImage(composite_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue|HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (composite_image->columns-1.0)/ 2.0; vertical_scale=(MagickRealType) (composite_image->rows-1.0)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1.0)/2.0; vertical_scale=(MagickRealType) (image->rows-1.0)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(composite_image->columns-1.0)/200.0; vertical_scale*=(composite_image->rows-1.0)/200.0; } else { horizontal_scale*=(image->columns-1.0)/200.0; vertical_scale*=(image->rows-1.0)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+(composite_image->columns-1)/ 2.0); else center.x=(MagickRealType) ((image->columns-1)/2); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) x_offset+geometry_info.xi; else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) == 0) center.y=(MagickRealType) (y_offset+(composite_image->rows-1)/ 2.0); else center.y=(MagickRealType) ((image->rows-1)/2); else if ((flags & AspectValue) == 0) center.y=(MagickRealType) y_offset+geometry_info.psi; else center.y=geometry_info.psi; } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); composite_view=AcquireVirtualCacheView(composite_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); for (y=0; y < (ssize_t) composite_image->rows; y++) { MagickBooleanType sync; register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(composite_view,0,y,composite_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) composite_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(composite_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(composite_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(composite_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); (void) InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.alpha)*(1.0-QuantumScale*GetPixelAlpha(composite_image,p))); SetPixelInfoPixel(destination_image,&pixel,q); p+=GetPixelChannels(composite_image); q+=GetPixelChannels(destination_image); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) break; } destination_view=DestroyCacheView(destination_view); composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); composite_image=DestroyImage(composite_image); composite_image=destination_image; break; } case DissolveCompositeOp: { const char *value; /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { destination_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; if ((destination_dissolve-MagickEpsilon) < 0.0) destination_dissolve=0.0; } break; } case BlendCompositeOp: { const char *value; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; destination_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) destination_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { const char *value; /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { const char *value; /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { const char *value; /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; composite_view=AcquireVirtualCacheView(composite_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(composite_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, luma, green, hue, red, chroma; PixelInfo destination_pixel, source_pixel; register const Quantum *restrict p; register Quantum *restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) composite_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) composite_image->rows)) { p=GetCacheViewVirtualPixels(composite_view,0,y-y_offset, composite_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(composite_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&destination_pixel); GetPixelInfo(composite_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) composite_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) composite_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: destination color. */ (void) GetOneVirtualPixel(composite_image,x-x_offset,y-y_offset, source,exception); if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait composite_traits=GetPixelChannelTraits(composite_image, channel); if ((traits == UndefinedPixelTrait) || (composite_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case IntensityCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { pixel=(MagickRealType) q[i]; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) { pixel=(MagickRealType) TransparentAlpha; break; } pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) { pixel=destination_dissolve*GetPixelAlpha(composite_image, source); break; } pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized destination alpha. */ Sa=QuantumScale*GetPixelAlpha(composite_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(composite_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DarkenCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case LightenCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-destination_dissolve*Da)+source_dissolve* Sa+destination_dissolve*Da; break; } case DstOverCompositeOp: { alpha=Da*(-Sa)+Da+Sa; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa*(-Da)+Sa+Da; break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(Sa+Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } if (GetPixelReadMask(image,q) == 0) { p+=GetPixelChannels(composite_image); q+=GetPixelChannels(image); continue; } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(composite_image,p,&source_pixel); GetPixelInfoPixel(image,q,&destination_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait composite_traits=GetPixelChannelTraits(composite_image, channel); if (traits == UndefinedPixelTrait) continue; if ((compose != IntensityCompositeOp) && (composite_traits == UndefinedPixelTrait)) continue; /* Sc: source color. Dc: destination color. */ Sc=(MagickRealType) GetPixelChannel(composite_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { if (channel != AlphaPixelChannel) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da > ((MagickRealType) QuantumRange/2.0)) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(composite_image,p,image,q); if (equivalent != MagickFalse) { pixel=(MagickRealType) TransparentAlpha; break; } pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs(QuantumRange*Da-TransparentAlpha) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { pixel=QuantumRange*Sa; if (composite_image->alpha_trait != BlendPixelTrait) pixel=GetPixelIntensity(composite_image,p); break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=(1.0-Sa)*GetPixelIntensity(composite_image,p) < (1.0-Da)*GetPixelIntensity(image,q) ? Sa : Da; break; } case IntensityCompositeOp: { pixel=GetPixelIntensity(composite_image,p); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(composite_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=ClampToQuantum(pixel); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized destination color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=1.0-alpha; break; } default: break; } gamma=PerceptibleReciprocal(alpha); pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=Sc*Sa+Dc*(1.0-Sa); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+destination_dissolve*Da*Dc); break; } case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(composite_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { /* Refer to the March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca < MagickEpsilon) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } if (fabs(QuantumRange*Da-TransparentAlpha) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(destination_pixel.red,destination_pixel.green, destination_pixel.blue,&sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: case IntensityCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(composite_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(composite_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(composite_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(composite_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if (Sc < Dc) { pixel=gamma*(Sa*Sc-Sa*Da*Dc+Da*Dc); break; } pixel=gamma*(Da*Dc-Da*Sa*Sc+Sa*Sc); break; } case DarkenIntensityCompositeOp: { pixel=(1.0-Sa)*GetPixelIntensity(composite_image,p) < (1.0-Da)*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-Sa*Da*2.0*MagickMin(Sc,Dc)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* destination_dissolve*Da*Dc+destination_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs(Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs(Dca) < MagickEpsilon) && (fabs(Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs(Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=Dc*Da+Sc*(1.0-Da); break; } case DstCompositeOp: case NoCompositeOp: { pixel=Dc; break; } case DstInCompositeOp: { pixel=gamma*(Sa*Dc*Sa); break; } case DstOutCompositeOp: { pixel=gamma*(Da*Dc*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=gamma*(Da*Dc-Da*Sa*Sc+Sa*Sc); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { double gamma; if ((Sa+Da) < 1.0) gamma=0.0; else gamma=1.0; pixel=(gamma*(1.0-Sca)*(1.0-Dca))+Sa*(1.0-Sca)*Dca+Da*(1.0-Dca)*Sca; break; } case HueCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } if (fabs(QuantumRange*Da-TransparentAlpha) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(destination_pixel.red,destination_pixel.green, destination_pixel.blue,&hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=gamma*(Da*Sc*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=QuantumRange*gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if (Sc > Dc) { pixel=gamma*(Sa*Sc-Sa*Da*Dc+Da*Dc); break; } pixel=gamma*(Da*Dc-Da*Sa*Sc+Sa*Sc); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(composite_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } if (fabs(QuantumRange*Da-TransparentAlpha) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(destination_pixel.red,destination_pixel.green, destination_pixel.blue,&hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=gamma*geometry_info.rho*Sa*Sc*Da*Dc+geometry_info.sigma* Sa*Sc*Da+geometry_info.xi*Da*Dc*Sa+geometry_info.psi*Sa*Da+ Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from destination. f(Sc,Dc) = Sc - Dc */ pixel=QuantumRange*gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(composite_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(destination_pixel.red,destination_pixel.green, destination_pixel.blue,&hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; if (pixel > QuantumRange) pixel-=QuantumRange; pixel=gamma*(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; if (pixel < 0.0) pixel+=QuantumRange; pixel=gamma*(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=gamma*(Sa*Sc*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sa*Sc-Sa*Da*Dc+Da*Dc); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca* (1.0-Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case SaturateCompositeOp: { if (fabs(QuantumRange*Sa-TransparentAlpha) < MagickEpsilon) { pixel=Dc; break; } if (fabs(QuantumRange*Da-TransparentAlpha) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(destination_pixel.red,destination_pixel.green, destination_pixel.blue,&hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { /* Refer to the March 2009 SVG specification. */ if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)* (4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)- (Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca* (1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*gamma*(Sc*Sa*(1.0-Da)+Dc*Da*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(composite_image); channels=GetPixelChannels(composite_image); if (p >= (pixels+channels*composite_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } composite_view=DestroyCacheView(composite_view); image_view=DestroyCacheView(image_view); if (destination_image != (Image * ) NULL) destination_image=DestroyImage(destination_image); else composite_image=DestroyImage(composite_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait == BlendPixelTrait) || (texture_image->alpha_trait == BlendPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickFalse,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(texture_image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel=GetPixelChannelChannel(texture_image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
convertCloud.h
/**************************************************************************** ** ** Copyright (C) 2017 TU Wien, ACIN, Vision 4 Robotics (V4R) group ** Contact: v4r.acin.tuwien.ac.at ** ** This file is part of V4R ** ** V4R is distributed under dual licenses - GPLv3 or closed source. ** ** GNU General Public License Usage ** V4R is free software: you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published ** by the Free Software Foundation, either version 3 of the License, or ** (at your option) any later version. ** ** V4R is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** Please review the following information to ensure the GNU General Public ** License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html. ** ** ** Commercial License Usage ** If GPL is not suitable for your project, you must purchase a commercial ** license to use V4R. Licensees holding valid commercial V4R licenses may ** use this file in accordance with the commercial license agreement ** provided with the Software or, alternatively, in accordance with the ** terms contained in a written agreement between you and TU Wien, ACIN, V4R. ** For licensing terms and conditions please contact office<at>acin.tuwien.ac.at. ** ** ** The copyright holder additionally grants the author(s) of the file the right ** to use, copy, modify, merge, publish, distribute, sublicense, and/or ** sell copies of their contributions without any restrictions. ** ****************************************************************************/ /** * @file main.cpp * @author Johann Prankl (prankl@acin.tuwien.ac.at) * @date 2017 * @brief * */ #ifndef KP_CONVERT_CLOUD_HPP #define KP_CONVERT_CLOUD_HPP #include <float.h> #include <omp.h> #include <pcl/point_cloud.h> #include <pcl/point_types.h> #include <v4r/common/PointTypes.h> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <v4r/common/impl/DataMatrix2D.hpp> namespace v4r { inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<PointXYZRGB> &kp_cloud) { kp_cloud.resize(cloud.height, cloud.width); for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[i]; PointXYZRGB &kp = kp_cloud.data[i]; kp.pt = pt.getVector4fMap(); kp.rgb = pt.rgb; } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud, cv::Mat_<cv::Vec3b> &image) { kp_cloud.resize(cloud.height, cloud.width); image = cv::Mat_<cv::Vec3b>(cloud.height, cloud.width); for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[i]; kp_cloud.data[i] = pt.getVector3fMap(); image(i) = cv::Vec3b(pt.b, pt.g, pt.r); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud) { kp_cloud.resize(cloud.height, cloud.width); for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[i]; kp_cloud.data[i] = pt.getVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, DataMatrix2D<Eigen::Vector3f> &kp_cloud) { kp_cloud.resize(cloud.height, cloud.width); for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZ &pt = cloud.points[i]; kp_cloud.data[i] = pt.getVector3fMap(); } } inline void convertCloud(const DataMatrix2D<PointXYZRGB> &kp_cloud, pcl::PointCloud<pcl::PointXYZRGB> &pcl_cloud) { pcl_cloud.points.resize(kp_cloud.data.size()); pcl_cloud.width = kp_cloud.cols; pcl_cloud.height = kp_cloud.rows; pcl_cloud.is_dense = false; for (size_t i = 0; i < pcl_cloud.points.size(); i++) { const PointXYZRGB &kp = kp_cloud.data[i]; pcl::PointXYZRGB &pt = pcl_cloud.points[i]; pt.getVector4fMap() = kp.pt; pt.rgb = kp.rgb; } } inline void convertCloud(const DataMatrix2D<PointXYZ> &kp_cloud, pcl::PointCloud<pcl::PointXYZ> &pcl_cloud) { pcl_cloud.points.resize(kp_cloud.data.size()); pcl_cloud.width = kp_cloud.cols; pcl_cloud.height = kp_cloud.rows; pcl_cloud.is_dense = false; for (size_t i = 0; i < pcl_cloud.points.size(); i++) { const PointXYZ &kp = kp_cloud.data[i]; pcl::PointXYZ &pt = pcl_cloud.points[i]; pt.getVector3fMap() = kp.pt; } } // -------------- SAME WITH EIGEN MATRIX------------------- inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, Eigen::Matrix4Xf &matrix) { matrix = Eigen::Matrix4Xf(4, cloud.points.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[i]; matrix.col(i) = pt.getVector4fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, Eigen::Matrix3Xf &matrix) { matrix = Eigen::Matrix3Xf(3, cloud.points.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[i]; matrix.col(i) = pt.getVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, const std::vector<int> &indices, Eigen::Matrix3Xf &matrix) { if (indices.empty()) throw std::runtime_error("Indices are empty!"); matrix = Eigen::Matrix3Xf(3, indices.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < indices.size(); i++) { const pcl::PointXYZRGB &pt = cloud.points[indices[i]]; matrix.col(i) = pt.getVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, Eigen::Matrix3Xf &matrix) { matrix = Eigen::Matrix3Xf(3, cloud.points.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { const pcl::PointXYZ &pt = cloud.points[i]; matrix.col(i) = pt.getVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::PointXYZ> &cloud, const std::vector<int> &indices, Eigen::Matrix3Xf &matrix) { if (indices.empty()) throw std::runtime_error("Indices are empty!"); matrix = Eigen::Matrix3Xf(3, indices.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < indices.size(); i++) { const pcl::PointXYZ &pt = cloud.points[indices[i]]; matrix.col(i) = pt.getVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::Normal> &normal, Eigen::Matrix3Xf &matrix) { matrix = Eigen::Matrix3Xf(3, normal.points.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < normal.points.size(); i++) { const pcl::Normal &pt = normal.points[i]; matrix.col(i) = pt.getNormalVector3fMap(); } } inline void convertCloud(const pcl::PointCloud<pcl::Normal> &normal, const std::vector<int> &indices, Eigen::Matrix3Xf &matrix) { if (indices.empty()) throw std::runtime_error("Indices are empty!"); matrix = Eigen::Matrix3Xf(3, indices.size()); #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < indices.size(); i++) { const pcl::Normal &pt = normal.points[indices[i]]; matrix.col(i) = pt.getNormalVector3fMap(); } } inline void convertCloud(const Eigen::Matrix4Xf &matrix, pcl::PointCloud<pcl::PointXYZRGB> &cloud) { cloud.points.resize(matrix.cols()); cloud.width = matrix.cols(); cloud.height = 1; cloud.is_dense = false; #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { pcl::PointXYZRGB &pt = cloud.points[i]; pt.getVector4fMap() = matrix.col(i); } } inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::PointXYZRGB> &cloud) { cloud.points.resize(matrix.cols()); cloud.width = matrix.cols(); cloud.height = 1; cloud.is_dense = false; #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { pcl::PointXYZRGB &pt = cloud.points[i]; pt.getVector3fMap() = matrix.col(i); } } inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::PointXYZ> &cloud) { cloud.points.resize(matrix.cols()); cloud.width = matrix.cols(); cloud.height = 1; cloud.is_dense = false; #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < cloud.points.size(); i++) { pcl::PointXYZ &pt = cloud.points[i]; pt.getVector3fMap() = matrix.col(i); } } inline void convertCloud(const Eigen::Matrix3Xf &matrix, pcl::PointCloud<pcl::Normal> &normals) { normals.points.resize(matrix.cols()); normals.width = matrix.cols(); normals.height = 1; normals.is_dense = false; #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < normals.points.size(); i++) { pcl::Normal &pt = normals.points[i]; pt.getNormalVector3fMap() = matrix.col(i); } } } // namespace v4r #endif
trmv_x_dia_n_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
enm_cython.c
/* Generated by Cython 0.25.1 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_25_1" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x03030000 || (PY_MAJOR_VERSION == 2 && PY_VERSION_HEX >= 0x02070000) #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__enm_cython #define __PYX_HAVE_API__enm_cython #include <omp.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "epidemic_network_modelling\\enm_cython.pyx", "stringsource", }; /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":275 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":326 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":951 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* BufferIndexErrorNogil.proto */ static void __Pyx_RaiseBufferIndexErrorNogil(int axis); /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_EqObjC(op1, op2, intval, inplace)\ PyObject_RichCompare(op1, op2, Py_EQ) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_SubtractObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) #endif /* SetItemInt.proto */ #define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, int wraparound, int boundscheck); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* IterFinish.proto */ static CYTHON_INLINE int __Pyx_IterFinish(void); /* UnpackItemEndCheck.proto */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_long(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'openmp' */ /* Module declarations from 'enm_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_10enm_cython_cfib(int); /*proto*/ static PyObject *__pyx_f_10enm_cython_c_array_f_multi(__Pyx_memviewslice); /*proto*/ static int __pyx_f_10enm_cython_c_sri_mc(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, double, double, double, int); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_long = { "long", NULL, sizeof(long), { 0 }, 0, IS_UNSIGNED(long) ? 'U' : 'I', IS_UNSIGNED(long), 0 }; #define __Pyx_MODULE_NAME "enm_cython" int __pyx_module_is_main_enm_cython = 0; /* Implementation of 'enm_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_C[] = "C"; static const char __pyx_k_N[] = "N"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_X[] = "X"; static const char __pyx_k_Y[] = "Y"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_x[] = "x"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_age[] = "age"; static const char __pyx_k_exp[] = "exp"; static const char __pyx_k_fib[] = "fib"; static const char __pyx_k_int[] = "int_"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_sum[] = "sum"; static const char __pyx_k_axis[] = "axis"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_math[] = "math"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_ones[] = "ones"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_array[] = "array"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_index[] = "index"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_random[] = "random"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_vstack[] = "vstack"; static const char __pyx_k_array_f[] = "array_f"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_nonzero[] = "nonzero"; static const char __pyx_k_num_its[] = "num_its"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_c_array_f[] = "c_array_f"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_transpose[] = "transpose"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_enm_cython[] = "enm_cython"; static const char __pyx_k_num_people[] = "num_people"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_init_distrib[] = "init_distrib"; static const char __pyx_k_num_infected[] = "num_infected"; static const char __pyx_k_num_recovered[] = "num_recovered"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_random_sample[] = "random_sample"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_num_susceptible[] = "num_susceptible"; static const char __pyx_k_adjacency_matrix[] = "adjacency_matrix"; static const char __pyx_k_graph_attributes[] = "graph_attributes"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_recovery_probability[] = "recovery_probability"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_array_f_multi_wrapper[] = "array_f_multi_wrapper"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_cython_wrapper_sri_mc[] = "cython_wrapper_sri_mc"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_occupation_probability[] = "occupation_probability"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_transmission_probability[] = "transmission_probability"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_This_option_not_implemented_yet[] = "This option not implemented yet sorry!"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_C_Users_club084_Documents_Classe[] = "C:\\Users\\club084\\Documents\\Classes\\Fall 2016\\CHE_477\\repos\\epidemic_network_modelling\\epidemic_network_modelling\\enm_cython.pyx"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_n_s_C; static PyObject *__pyx_kp_s_C_Users_club084_Documents_Classe; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_s_N; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_kp_s_This_option_not_implemented_yet; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X; static PyObject *__pyx_n_s_Y; static PyObject *__pyx_n_s_adjacency_matrix; static PyObject *__pyx_n_s_age; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_array; static PyObject *__pyx_n_s_array_f; static PyObject *__pyx_n_s_array_f_multi_wrapper; static PyObject *__pyx_n_s_axis; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_c_array_f; static PyObject *__pyx_n_s_class; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_cython_wrapper_sri_mc; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enm_cython; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_exp; static PyObject *__pyx_n_s_fib; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_graph_attributes; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_index; static PyObject *__pyx_n_s_init_distrib; static PyObject *__pyx_n_s_int; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_math; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_nonzero; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_num_infected; static PyObject *__pyx_n_s_num_its; static PyObject *__pyx_n_s_num_people; static PyObject *__pyx_n_s_num_recovered; static PyObject *__pyx_n_s_num_susceptible; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_occupation_probability; static PyObject *__pyx_n_s_ones; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_random; static PyObject *__pyx_n_s_random_sample; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_recovery_probability; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_sum; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_transmission_probability; static PyObject *__pyx_n_s_transpose; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_vstack; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_10enm_cython_fib(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x); /* proto */ static PyObject *__pyx_pf_10enm_cython_2array_f(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_10enm_cython_4c_array_f(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X); /* proto */ static PyObject *__pyx_pf_10enm_cython_6array_f_multi_wrapper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y); /* proto */ static PyObject *__pyx_pf_10enm_cython_8cython_wrapper_sri_mc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_adjacency_matrix, PyObject *__pyx_v_age, PyObject *__pyx_v_transmission_probability, PyObject *__pyx_v_recovery_probability, PyObject *__pyx_v_occupation_probability, PyObject *__pyx_v_init_distrib, PyObject *__pyx_v_num_its); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_float_0_5; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_2; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__13; static PyObject *__pyx_slice__14; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__29; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__31; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; /* "enm_cython.pyx":8 * * * cdef double cfib(int n): # <<<<<<<<<<<<<< * cdef int i * cdef double a=0.0,b=1.0 */ static double __pyx_f_10enm_cython_cfib(int __pyx_v_n) { CYTHON_UNUSED int __pyx_v_i; double __pyx_v_a; double __pyx_v_b; double __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; double __pyx_t_3; double __pyx_t_4; __Pyx_RefNannySetupContext("cfib", 0); /* "enm_cython.pyx":10 * cdef double cfib(int n): * cdef int i * cdef double a=0.0,b=1.0 # <<<<<<<<<<<<<< * for i in range(n): * a, b = a+b, a */ __pyx_v_a = 0.0; __pyx_v_b = 1.0; /* "enm_cython.pyx":11 * cdef int i * cdef double a=0.0,b=1.0 * for i in range(n): # <<<<<<<<<<<<<< * a, b = a+b, a * return a */ __pyx_t_1 = __pyx_v_n; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "enm_cython.pyx":12 * cdef double a=0.0,b=1.0 * for i in range(n): * a, b = a+b, a # <<<<<<<<<<<<<< * return a * */ __pyx_t_3 = (__pyx_v_a + __pyx_v_b); __pyx_t_4 = __pyx_v_a; __pyx_v_a = __pyx_t_3; __pyx_v_b = __pyx_t_4; } /* "enm_cython.pyx":13 * for i in range(n): * a, b = a+b, a * return a # <<<<<<<<<<<<<< * * def fib(x): */ __pyx_r = __pyx_v_a; goto __pyx_L0; /* "enm_cython.pyx":8 * * * cdef double cfib(int n): # <<<<<<<<<<<<<< * cdef int i * cdef double a=0.0,b=1.0 */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":15 * return a * * def fib(x): # <<<<<<<<<<<<<< * return cfib(x) * */ /* Python wrapper */ static PyObject *__pyx_pw_10enm_cython_1fib(PyObject *__pyx_self, PyObject *__pyx_v_x); /*proto*/ static PyMethodDef __pyx_mdef_10enm_cython_1fib = {"fib", (PyCFunction)__pyx_pw_10enm_cython_1fib, METH_O, 0}; static PyObject *__pyx_pw_10enm_cython_1fib(PyObject *__pyx_self, PyObject *__pyx_v_x) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fib (wrapper)", 0); __pyx_r = __pyx_pf_10enm_cython_fib(__pyx_self, ((PyObject *)__pyx_v_x)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_10enm_cython_fib(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_x) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("fib", 0); /* "enm_cython.pyx":16 * * def fib(x): * return cfib(x) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_As_int(__pyx_v_x); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L1_error) __pyx_t_2 = PyFloat_FromDouble(__pyx_f_10enm_cython_cfib(__pyx_t_1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "enm_cython.pyx":15 * return a * * def fib(x): # <<<<<<<<<<<<<< * return cfib(x) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("enm_cython.fib", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":19 * * * def array_f(X): # <<<<<<<<<<<<<< * * Y = np.zeros(X.shape) */ /* Python wrapper */ static PyObject *__pyx_pw_10enm_cython_3array_f(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static PyMethodDef __pyx_mdef_10enm_cython_3array_f = {"array_f", (PyCFunction)__pyx_pw_10enm_cython_3array_f, METH_O, 0}; static PyObject *__pyx_pw_10enm_cython_3array_f(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_f (wrapper)", 0); __pyx_r = __pyx_pf_10enm_cython_2array_f(__pyx_self, ((PyObject *)__pyx_v_X)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_10enm_cython_2array_f(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_v_Y = NULL; PyObject *__pyx_v_index = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_f", 0); /* "enm_cython.pyx":21 * def array_f(X): * * Y = np.zeros(X.shape) # <<<<<<<<<<<<<< * index = X > 0.5 * Y[index] = np.exp(X[index]) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_Y = __pyx_t_1; __pyx_t_1 = 0; /* "enm_cython.pyx":22 * * Y = np.zeros(X.shape) * index = X > 0.5 # <<<<<<<<<<<<<< * Y[index] = np.exp(X[index]) * */ __pyx_t_1 = PyObject_RichCompare(__pyx_v_X, __pyx_float_0_5, Py_GT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_v_index = __pyx_t_1; __pyx_t_1 = 0; /* "enm_cython.pyx":23 * Y = np.zeros(X.shape) * index = X > 0.5 * Y[index] = np.exp(X[index]) # <<<<<<<<<<<<<< * * return Y */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_exp); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyObject_GetItem(__pyx_v_X, __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_3}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_3}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(PyObject_SetItem(__pyx_v_Y, __pyx_v_index, __pyx_t_1) < 0)) __PYX_ERR(0, 23, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":25 * Y[index] = np.exp(X[index]) * * return Y # <<<<<<<<<<<<<< * * def c_array_f(X): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_Y); __pyx_r = __pyx_v_Y; goto __pyx_L0; /* "enm_cython.pyx":19 * * * def array_f(X): # <<<<<<<<<<<<<< * * Y = np.zeros(X.shape) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("enm_cython.array_f", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_Y); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":27 * return Y * * def c_array_f(X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_10enm_cython_5c_array_f(PyObject *__pyx_self, PyObject *__pyx_v_X); /*proto*/ static PyMethodDef __pyx_mdef_10enm_cython_5c_array_f = {"c_array_f", (PyCFunction)__pyx_pw_10enm_cython_5c_array_f, METH_O, 0}; static PyObject *__pyx_pw_10enm_cython_5c_array_f(PyObject *__pyx_self, PyObject *__pyx_v_X) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("c_array_f (wrapper)", 0); __pyx_r = __pyx_pf_10enm_cython_4c_array_f(__pyx_self, ((PyObject *)__pyx_v_X)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_10enm_cython_4c_array_f(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_X) { int __pyx_v_N; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_8; int __pyx_t_9; double __pyx_t_10; Py_ssize_t __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; __Pyx_RefNannySetupContext("c_array_f", 0); /* "enm_cython.pyx":29 * def c_array_f(X): * * cdef int N = X.shape[0] # <<<<<<<<<<<<<< * cdef double[:] Y = np.zeros(N) * cdef int i */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_X, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 29, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_N = __pyx_t_3; /* "enm_cython.pyx":30 * * cdef int N = X.shape[0] * cdef double[:] Y = np.zeros(N) # <<<<<<<<<<<<<< * cdef int i * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_2); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v_Y = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "enm_cython.pyx":33 * cdef int i * * for i in range(N): # <<<<<<<<<<<<<< * if X[i] > 0.5: * Y[i] = X[i] * 2 */ __pyx_t_3 = __pyx_v_N; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_3; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "enm_cython.pyx":34 * * for i in range(N): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = X[i] * 2 * else: */ __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_X, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyObject_RichCompare(__pyx_t_2, __pyx_float_0_5, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 34, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_9) { /* "enm_cython.pyx":35 * for i in range(N): * if X[i] > 0.5: * Y[i] = X[i] * 2 # <<<<<<<<<<<<<< * else: * Y[i] = 0 */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_X, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = PyNumber_Multiply(__pyx_t_4, __pyx_int_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_10 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_10 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_11 = __pyx_v_i; __pyx_t_12 = -1; if (__pyx_t_11 < 0) { __pyx_t_11 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_11 < 0)) __pyx_t_12 = 0; } else if (unlikely(__pyx_t_11 >= __pyx_v_Y.shape[0])) __pyx_t_12 = 0; if (unlikely(__pyx_t_12 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_12); __PYX_ERR(0, 35, __pyx_L1_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_11 * __pyx_v_Y.strides[0]) )) = __pyx_t_10; /* "enm_cython.pyx":34 * * for i in range(N): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = X[i] * 2 * else: */ goto __pyx_L5; } /* "enm_cython.pyx":37 * Y[i] = X[i] * 2 * else: * Y[i] = 0 # <<<<<<<<<<<<<< * * return Y */ /*else*/ { __pyx_t_13 = __pyx_v_i; __pyx_t_12 = -1; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_13 < 0)) __pyx_t_12 = 0; } else if (unlikely(__pyx_t_13 >= __pyx_v_Y.shape[0])) __pyx_t_12 = 0; if (unlikely(__pyx_t_12 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_12); __PYX_ERR(0, 37, __pyx_L1_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_13 * __pyx_v_Y.strides[0]) )) = 0.0; } __pyx_L5:; } /* "enm_cython.pyx":39 * Y[i] = 0 * * return Y # <<<<<<<<<<<<<< * * cdef c_array_f_multi(long[:] X): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_Y, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "enm_cython.pyx":27 * return Y * * def c_array_f(X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __Pyx_AddTraceback("enm_cython.c_array_f", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":41 * return Y * * cdef c_array_f_multi(long[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ static PyObject *__pyx_f_10enm_cython_c_array_f_multi(__Pyx_memviewslice __pyx_v_X) { int __pyx_v_N; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; CYTHON_UNUSED int __pyx_v_thread_num; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; Py_ssize_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; __Pyx_RefNannySetupContext("c_array_f_multi", 0); /* "enm_cython.pyx":43 * cdef c_array_f_multi(long[:] X): * * cdef int N = X.shape[0] # <<<<<<<<<<<<<< * cdef double[:] Y = np.zeros(N) * cdef int i */ __pyx_v_N = (__pyx_v_X.shape[0]); /* "enm_cython.pyx":44 * * cdef int N = X.shape[0] * cdef double[:] Y = np.zeros(N) # <<<<<<<<<<<<<< * cdef int i * */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 44, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_Y = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "enm_cython.pyx":48 * * cdef int num_threads * cdef int thread_num = openmp.omp_get_num_threads() # <<<<<<<<<<<<<< * * */ __pyx_v_thread_num = omp_get_num_threads(); /* "enm_cython.pyx":54 * * * for i in prange(N, nogil=True): # <<<<<<<<<<<<<< * if X[i] > 0.5: * Y[i] = X[i] * 2 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_7 = __pyx_v_N; if (1 == 0) abort(); { int __pyx_parallel_temp0 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ if (__pyx_parallel_why < 2) { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* "enm_cython.pyx":55 * * for i in prange(N, nogil=True): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = X[i] * 2 * else: */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_v_X.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 55, __pyx_L8_error) } __pyx_t_12 = (((*((long *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_10 * __pyx_v_X.strides[0]) ))) > 0.5) != 0); if (__pyx_t_12) { /* "enm_cython.pyx":56 * for i in prange(N, nogil=True): * if X[i] > 0.5: * Y[i] = X[i] * 2 # <<<<<<<<<<<<<< * else: * Y[i] = 0 */ __pyx_t_13 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_13 >= __pyx_v_X.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 56, __pyx_L8_error) } __pyx_t_14 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_14 < 0) { __pyx_t_14 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_14 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_14 >= __pyx_v_Y.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 56, __pyx_L8_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_14 * __pyx_v_Y.strides[0]) )) = ((*((long *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_13 * __pyx_v_X.strides[0]) ))) * 2); /* "enm_cython.pyx":55 * * for i in prange(N, nogil=True): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = X[i] * 2 * else: */ goto __pyx_L10; } /* "enm_cython.pyx":58 * Y[i] = X[i] * 2 * else: * Y[i] = 0 # <<<<<<<<<<<<<< * * return Y */ /*else*/ { __pyx_t_15 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_15 < 0) { __pyx_t_15 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_15 >= __pyx_v_Y.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 58, __pyx_L8_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_15 * __pyx_v_Y.strides[0]) )) = 0.0; } __pyx_L10:; goto __pyx_L12; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L11; __pyx_L11:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_i; } __pyx_L12:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_i = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "enm_cython.pyx":54 * * * for i in prange(N, nogil=True): # <<<<<<<<<<<<<< * if X[i] > 0.5: * Y[i] = X[i] * 2 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "enm_cython.pyx":60 * Y[i] = 0 * * return Y # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_Y, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "enm_cython.pyx":41 * return Y * * cdef c_array_f_multi(long[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("enm_cython.c_array_f_multi", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":63 * * * def array_f_multi_wrapper(Y): # <<<<<<<<<<<<<< * X = Y * */ /* Python wrapper */ static PyObject *__pyx_pw_10enm_cython_7array_f_multi_wrapper(PyObject *__pyx_self, PyObject *__pyx_v_Y); /*proto*/ static PyMethodDef __pyx_mdef_10enm_cython_7array_f_multi_wrapper = {"array_f_multi_wrapper", (PyCFunction)__pyx_pw_10enm_cython_7array_f_multi_wrapper, METH_O, 0}; static PyObject *__pyx_pw_10enm_cython_7array_f_multi_wrapper(PyObject *__pyx_self, PyObject *__pyx_v_Y) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("array_f_multi_wrapper (wrapper)", 0); __pyx_r = __pyx_pf_10enm_cython_6array_f_multi_wrapper(__pyx_self, ((PyObject *)__pyx_v_Y)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_10enm_cython_6array_f_multi_wrapper(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_Y) { PyObject *__pyx_v_X = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("array_f_multi_wrapper", 0); /* "enm_cython.pyx":64 * * def array_f_multi_wrapper(Y): * X = Y # <<<<<<<<<<<<<< * * return c_array_f_multi(X) */ __Pyx_INCREF(__pyx_v_Y); __pyx_v_X = __pyx_v_Y; /* "enm_cython.pyx":66 * X = Y * * return c_array_f_multi(X) # <<<<<<<<<<<<<< * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_to_MemoryviewSlice_ds_long(__pyx_v_X); if (unlikely(!__pyx_t_1.memview)) __PYX_ERR(0, 66, __pyx_L1_error) __pyx_t_2 = __pyx_f_10enm_cython_c_array_f_multi(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 66, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __pyx_t_1.memview = NULL; __pyx_t_1.data = NULL; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "enm_cython.pyx":63 * * * def array_f_multi_wrapper(Y): # <<<<<<<<<<<<<< * X = Y * */ /* function exit code */ __pyx_L1_error:; __PYX_XDEC_MEMVIEW(&__pyx_t_1, 1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("enm_cython.array_f_multi_wrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_X); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":68 * return c_array_f_multi(X) * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): # <<<<<<<<<<<<<< * if init_distrib == 0: * num_people = len(adjacency_matrix) */ /* Python wrapper */ static PyObject *__pyx_pw_10enm_cython_9cython_wrapper_sri_mc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_10enm_cython_9cython_wrapper_sri_mc = {"cython_wrapper_sri_mc", (PyCFunction)__pyx_pw_10enm_cython_9cython_wrapper_sri_mc, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_10enm_cython_9cython_wrapper_sri_mc(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_adjacency_matrix = 0; PyObject *__pyx_v_age = 0; PyObject *__pyx_v_transmission_probability = 0; PyObject *__pyx_v_recovery_probability = 0; PyObject *__pyx_v_occupation_probability = 0; PyObject *__pyx_v_init_distrib = 0; PyObject *__pyx_v_num_its = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cython_wrapper_sri_mc (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_adjacency_matrix,&__pyx_n_s_age,&__pyx_n_s_transmission_probability,&__pyx_n_s_recovery_probability,&__pyx_n_s_occupation_probability,&__pyx_n_s_init_distrib,&__pyx_n_s_num_its,0}; PyObject* values[7] = {0,0,0,0,0,0,0}; values[5] = ((PyObject *)__pyx_int_0); values[6] = ((PyObject *)__pyx_int_0); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_adjacency_matrix)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_age)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("cython_wrapper_sri_mc", 0, 5, 7, 1); __PYX_ERR(0, 68, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_transmission_probability)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("cython_wrapper_sri_mc", 0, 5, 7, 2); __PYX_ERR(0, 68, __pyx_L3_error) } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_recovery_probability)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("cython_wrapper_sri_mc", 0, 5, 7, 3); __PYX_ERR(0, 68, __pyx_L3_error) } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_occupation_probability)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("cython_wrapper_sri_mc", 0, 5, 7, 4); __PYX_ERR(0, 68, __pyx_L3_error) } case 5: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_init_distrib); if (value) { values[5] = value; kw_args--; } } case 6: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_num_its); if (value) { values[6] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "cython_wrapper_sri_mc") < 0)) __PYX_ERR(0, 68, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_adjacency_matrix = values[0]; __pyx_v_age = values[1]; __pyx_v_transmission_probability = values[2]; __pyx_v_recovery_probability = values[3]; __pyx_v_occupation_probability = values[4]; __pyx_v_init_distrib = values[5]; __pyx_v_num_its = values[6]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cython_wrapper_sri_mc", 0, 5, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 68, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("enm_cython.cython_wrapper_sri_mc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_10enm_cython_8cython_wrapper_sri_mc(__pyx_self, __pyx_v_adjacency_matrix, __pyx_v_age, __pyx_v_transmission_probability, __pyx_v_recovery_probability, __pyx_v_occupation_probability, __pyx_v_init_distrib, __pyx_v_num_its); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_10enm_cython_8cython_wrapper_sri_mc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_adjacency_matrix, PyObject *__pyx_v_age, PyObject *__pyx_v_transmission_probability, PyObject *__pyx_v_recovery_probability, PyObject *__pyx_v_occupation_probability, PyObject *__pyx_v_init_distrib, PyObject *__pyx_v_num_its) { PyObject *__pyx_v_num_people = NULL; PyObject *__pyx_v_graph_attributes = NULL; PyObject *__pyx_v_num_susceptible = NULL; PyObject *__pyx_v_num_infected = NULL; CYTHON_UNUSED PyObject *__pyx_v_num_recovered = NULL; CYTHON_UNUSED PyObject *__pyx_v_i = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; PyObject *__pyx_t_13 = NULL; PyObject *(*__pyx_t_14)(PyObject *); __Pyx_memviewslice __pyx_t_15 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_16 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_17 = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_t_18; double __pyx_t_19; double __pyx_t_20; int __pyx_t_21; __Pyx_RefNannySetupContext("cython_wrapper_sri_mc", 0); /* "enm_cython.pyx":69 * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): * if init_distrib == 0: # <<<<<<<<<<<<<< * num_people = len(adjacency_matrix) * graph_attributes = np.transpose(np.vstack((np.vstack((np.ones(num_people),np.zeros(num_people))), np.zeros(num_people)))) */ __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_v_init_distrib, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 69, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_2) { /* "enm_cython.pyx":70 * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): * if init_distrib == 0: * num_people = len(adjacency_matrix) # <<<<<<<<<<<<<< * graph_attributes = np.transpose(np.vstack((np.vstack((np.ones(num_people),np.zeros(num_people))), np.zeros(num_people)))) * */ __pyx_t_3 = PyObject_Length(__pyx_v_adjacency_matrix); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(0, 70, __pyx_L1_error) __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_num_people = __pyx_t_1; __pyx_t_1 = 0; /* "enm_cython.pyx":71 * if init_distrib == 0: * num_people = len(adjacency_matrix) * graph_attributes = np.transpose(np.vstack((np.vstack((np.ones(num_people),np.zeros(num_people))), np.zeros(num_people)))) # <<<<<<<<<<<<<< * * #first column is susceptible, second infected, third resistant. Each row is a person */ __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_transpose); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_vstack); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_vstack); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_10 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_10, __pyx_n_s_ones); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_11); if (likely(__pyx_t_10)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); __Pyx_INCREF(__pyx_t_10); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_11, function); } } if (!__pyx_t_10) { __pyx_t_8 = __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_v_num_people); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_10, __pyx_v_num_people}; __pyx_t_8 = __Pyx_PyFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_8); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_11)) { PyObject *__pyx_temp[2] = {__pyx_t_10, __pyx_v_num_people}; __pyx_t_8 = __Pyx_PyCFunction_FastCall(__pyx_t_11, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_8); } else #endif { __pyx_t_12 = PyTuple_New(1+1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_10); __pyx_t_10 = NULL; __Pyx_INCREF(__pyx_v_num_people); __Pyx_GIVEREF(__pyx_v_num_people); PyTuple_SET_ITEM(__pyx_t_12, 0+1, __pyx_v_num_people); __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_11, __pyx_t_12, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; } } __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_12 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_12, __pyx_n_s_zeros); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_t_12 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_12)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_12); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (!__pyx_t_12) { __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_v_num_people); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_12, __pyx_v_num_people}; __pyx_t_11 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_11); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_12, __pyx_v_num_people}; __pyx_t_11 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_GOTREF(__pyx_t_11); } else #endif { __pyx_t_13 = PyTuple_New(1+1); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_GIVEREF(__pyx_t_12); PyTuple_SET_ITEM(__pyx_t_13, 0, __pyx_t_12); __pyx_t_12 = NULL; __Pyx_INCREF(__pyx_v_num_people); __Pyx_GIVEREF(__pyx_v_num_people); PyTuple_SET_ITEM(__pyx_t_13, 0+1, __pyx_v_num_people); __pyx_t_11 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_13, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; } } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_11); __pyx_t_8 = 0; __pyx_t_11 = 0; __pyx_t_11 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_11 = PyMethod_GET_SELF(__pyx_t_9); if (likely(__pyx_t_11)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_11); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_9, function); } } if (!__pyx_t_11) { __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_6); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_t_10}; __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_9)) { PyObject *__pyx_temp[2] = {__pyx_t_11, __pyx_t_10}; __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_9, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif { __pyx_t_8 = PyTuple_New(1+1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_11); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_11); __pyx_t_11 = NULL; __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_8, 0+1, __pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_8, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } } __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_8 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_zeros); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_10); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_10, function); } } if (!__pyx_t_8) { __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_t_10, __pyx_v_num_people); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_v_num_people}; __pyx_t_9 = __Pyx_PyFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_10)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_v_num_people}; __pyx_t_9 = __Pyx_PyCFunction_FastCall(__pyx_t_10, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_9); } else #endif { __pyx_t_11 = PyTuple_New(1+1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_INCREF(__pyx_v_num_people); __Pyx_GIVEREF(__pyx_v_num_people); PyTuple_SET_ITEM(__pyx_t_11, 0+1, __pyx_v_num_people); __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_10, __pyx_t_11, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; } } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_6 = 0; __pyx_t_9 = 0; __pyx_t_9 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_9)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_9) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_GOTREF(__pyx_t_4); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_10}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_9, __pyx_t_10}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_9); __pyx_t_9 = NULL; __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_10); __pyx_t_10 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (!__pyx_t_7) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_7, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_7); __pyx_t_7 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_graph_attributes = __pyx_t_1; __pyx_t_1 = 0; /* "enm_cython.pyx":74 * * #first column is susceptible, second infected, third resistant. Each row is a person * num_susceptible = [0 for i in range(num_its+1)] #creating our return array. First value in array is the initial distribution # <<<<<<<<<<<<<< * num_infected = [0 for i in range(num_its+1)] * num_recovered = [0 for i in range(num_its+1)] */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyInt_AddObjC(__pyx_v_num_its, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) { __pyx_t_6 = __pyx_t_5; __Pyx_INCREF(__pyx_t_6); __pyx_t_3 = 0; __pyx_t_14 = NULL; } else { __pyx_t_3 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_14 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 74, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (;;) { if (likely(!__pyx_t_14)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 74, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 74, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_14(__pyx_t_6); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 74, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5); __pyx_t_5 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_int_0))) __PYX_ERR(0, 74, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_num_susceptible = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":75 * #first column is susceptible, second infected, third resistant. Each row is a person * num_susceptible = [0 for i in range(num_its+1)] #creating our return array. First value in array is the initial distribution * num_infected = [0 for i in range(num_its+1)] # <<<<<<<<<<<<<< * num_recovered = [0 for i in range(num_its+1)] * graph_attributes[0,0] = 0 */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyInt_AddObjC(__pyx_v_num_its, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_5, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (likely(PyList_CheckExact(__pyx_t_6)) || PyTuple_CheckExact(__pyx_t_6)) { __pyx_t_5 = __pyx_t_6; __Pyx_INCREF(__pyx_t_5); __pyx_t_3 = 0; __pyx_t_14 = NULL; } else { __pyx_t_3 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_14 = Py_TYPE(__pyx_t_5)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 75, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { if (likely(!__pyx_t_14)) { if (likely(PyList_CheckExact(__pyx_t_5))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 75, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_3); __Pyx_INCREF(__pyx_t_6); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 75, __pyx_L1_error) #else __pyx_t_6 = PySequence_ITEM(__pyx_t_5, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 75, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); #endif } } else { __pyx_t_6 = __pyx_t_14(__pyx_t_5); if (unlikely(!__pyx_t_6)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 75, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_6); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_6); __pyx_t_6 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_int_0))) __PYX_ERR(0, 75, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_num_infected = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":76 * num_susceptible = [0 for i in range(num_its+1)] #creating our return array. First value in array is the initial distribution * num_infected = [0 for i in range(num_its+1)] * num_recovered = [0 for i in range(num_its+1)] # <<<<<<<<<<<<<< * graph_attributes[0,0] = 0 * graph_attributes[0,1] = 1 */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyInt_AddObjC(__pyx_v_num_its, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_range, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) { __pyx_t_6 = __pyx_t_5; __Pyx_INCREF(__pyx_t_6); __pyx_t_3 = 0; __pyx_t_14 = NULL; } else { __pyx_t_3 = -1; __pyx_t_6 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_14 = Py_TYPE(__pyx_t_6)->tp_iternext; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 76, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (;;) { if (likely(!__pyx_t_14)) { if (likely(PyList_CheckExact(__pyx_t_6))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_6, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 76, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_6)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_6, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(0, 76, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_6, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_14(__pyx_t_6); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 76, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_5); __pyx_t_5 = 0; if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_int_0))) __PYX_ERR(0, 76, __pyx_L1_error) } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_num_recovered = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":77 * num_infected = [0 for i in range(num_its+1)] * num_recovered = [0 for i in range(num_its+1)] * graph_attributes[0,0] = 0 # <<<<<<<<<<<<<< * graph_attributes[0,1] = 1 * num_susceptible[0] = num_people - 1 */ if (unlikely(PyObject_SetItem(__pyx_v_graph_attributes, __pyx_tuple_, __pyx_int_0) < 0)) __PYX_ERR(0, 77, __pyx_L1_error) /* "enm_cython.pyx":78 * num_recovered = [0 for i in range(num_its+1)] * graph_attributes[0,0] = 0 * graph_attributes[0,1] = 1 # <<<<<<<<<<<<<< * num_susceptible[0] = num_people - 1 * num_infected[0] = 1 */ if (unlikely(PyObject_SetItem(__pyx_v_graph_attributes, __pyx_tuple__2, __pyx_int_1) < 0)) __PYX_ERR(0, 78, __pyx_L1_error) /* "enm_cython.pyx":79 * graph_attributes[0,0] = 0 * graph_attributes[0,1] = 1 * num_susceptible[0] = num_people - 1 # <<<<<<<<<<<<<< * num_infected[0] = 1 * */ __pyx_t_1 = __Pyx_PyInt_SubtractObjC(__pyx_v_num_people, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(__Pyx_SetItemInt(__pyx_v_num_susceptible, 0, __pyx_t_1, long, 1, __Pyx_PyInt_From_long, 1, 0, 1) < 0)) __PYX_ERR(0, 79, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":80 * graph_attributes[0,1] = 1 * num_susceptible[0] = num_people - 1 * num_infected[0] = 1 # <<<<<<<<<<<<<< * * else: */ if (unlikely(__Pyx_SetItemInt(__pyx_v_num_infected, 0, __pyx_int_1, long, 1, __Pyx_PyInt_From_long, 1, 0, 1) < 0)) __PYX_ERR(0, 80, __pyx_L1_error) /* "enm_cython.pyx":69 * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): * if init_distrib == 0: # <<<<<<<<<<<<<< * num_people = len(adjacency_matrix) * graph_attributes = np.transpose(np.vstack((np.vstack((np.ones(num_people),np.zeros(num_people))), np.zeros(num_people)))) */ goto __pyx_L3; } /* "enm_cython.pyx":83 * * else: * raise ValueError('This option not implemented yet sorry!') #first column is susceptible, second infected, third resistant # <<<<<<<<<<<<<< * * graph_attributes = np.array(graph_attributes,order='C') */ /*else*/ { __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 83, __pyx_L1_error) } __pyx_L3:; /* "enm_cython.pyx":85 * raise ValueError('This option not implemented yet sorry!') #first column is susceptible, second infected, third resistant * * graph_attributes = np.array(graph_attributes,order='C') # <<<<<<<<<<<<<< * return c_sri_mc(adjacency_matrix, graph_attributes,age,transmission_probability,recovery_probability,occupation_probability,num_its) * # return susceptible, infected, recovered */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_array); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_graph_attributes); __Pyx_GIVEREF(__pyx_v_graph_attributes); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_graph_attributes); __pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_order, __pyx_n_s_C) < 0) __PYX_ERR(0, 85, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 85, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_graph_attributes, __pyx_t_4); __pyx_t_4 = 0; /* "enm_cython.pyx":86 * * graph_attributes = np.array(graph_attributes,order='C') * return c_sri_mc(adjacency_matrix, graph_attributes,age,transmission_probability,recovery_probability,occupation_probability,num_its) # <<<<<<<<<<<<<< * # return susceptible, infected, recovered * */ __Pyx_XDECREF(__pyx_r); __pyx_t_15 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_adjacency_matrix); if (unlikely(!__pyx_t_15.memview)) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_16 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_v_graph_attributes); if (unlikely(!__pyx_t_16.memview)) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_17 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(__pyx_v_age); if (unlikely(!__pyx_t_17.memview)) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_18 = __pyx_PyFloat_AsDouble(__pyx_v_transmission_probability); if (unlikely((__pyx_t_18 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_19 = __pyx_PyFloat_AsDouble(__pyx_v_recovery_probability); if (unlikely((__pyx_t_19 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_20 = __pyx_PyFloat_AsDouble(__pyx_v_occupation_probability); if (unlikely((__pyx_t_20 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_21 = __Pyx_PyInt_As_int(__pyx_v_num_its); if (unlikely((__pyx_t_21 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_f_10enm_cython_c_sri_mc(__pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 86, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_15, 1); __pyx_t_15.memview = NULL; __pyx_t_15.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_16, 1); __pyx_t_16.memview = NULL; __pyx_t_16.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_17, 1); __pyx_t_17.memview = NULL; __pyx_t_17.data = NULL; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "enm_cython.pyx":68 * return c_array_f_multi(X) * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): # <<<<<<<<<<<<<< * if init_distrib == 0: * num_people = len(adjacency_matrix) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_13); __PYX_XDEC_MEMVIEW(&__pyx_t_15, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_16, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_17, 1); __Pyx_AddTraceback("enm_cython.cython_wrapper_sri_mc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_num_people); __Pyx_XDECREF(__pyx_v_graph_attributes); __Pyx_XDECREF(__pyx_v_num_susceptible); __Pyx_XDECREF(__pyx_v_num_infected); __Pyx_XDECREF(__pyx_v_num_recovered); __Pyx_XDECREF(__pyx_v_i); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "enm_cython.pyx":89 * # return susceptible, infected, recovered * * cdef int c_sri_mc(double[:, ::1] adj_mat, double[:,::1] graph_attributes, long[:, ::1] age, double transmission_probability, double recovery_probability, double occupation_probability, int num_its): # <<<<<<<<<<<<<< * * */ static int __pyx_f_10enm_cython_c_sri_mc(__Pyx_memviewslice __pyx_v_adj_mat, __Pyx_memviewslice __pyx_v_graph_attributes, CYTHON_UNUSED __Pyx_memviewslice __pyx_v_age, double __pyx_v_transmission_probability, double __pyx_v_recovery_probability, double __pyx_v_occupation_probability, int __pyx_v_num_its) { __Pyx_memviewslice __pyx_v_num_susceptible = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_num_infected = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_num_recovered = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_n; int __pyx_v_i; int __pyx_v_j; long __pyx_v_num_possible_infections; __Pyx_memviewslice __pyx_v_infections = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_infection_probability; double __pyx_v_recovery_threshold; double __pyx_v_random_event; PyObject *__pyx_v_susceptible = NULL; PyObject *__pyx_v_infected = NULL; PyObject *__pyx_v_person = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_7; __Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; int __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; int __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; __Pyx_memviewslice __pyx_t_31 = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *(*__pyx_t_32)(PyObject *); Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; long __pyx_t_35; double __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; Py_ssize_t __pyx_t_42; Py_ssize_t __pyx_t_43; Py_ssize_t __pyx_t_44; PyObject *(*__pyx_t_45)(PyObject *); double __pyx_t_46; double __pyx_t_47; Py_ssize_t __pyx_t_48; Py_ssize_t __pyx_t_49; Py_ssize_t __pyx_t_50; Py_ssize_t __pyx_t_51; Py_ssize_t __pyx_t_52; Py_ssize_t __pyx_t_53; __Pyx_RefNannySetupContext("c_sri_mc", 0); /* "enm_cython.pyx":92 * * * cdef double[:,::1] num_susceptible = np.zeros((num_its+1,1)) # <<<<<<<<<<<<<< * cdef double[:,::1] num_infected = np.zeros((num_its + 1,1)) * cdef double[:,::1] num_recovered = np.zeros((num_its+1,1)) */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_long((__pyx_v_num_its + 1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 92, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_num_susceptible = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "enm_cython.pyx":93 * * cdef double[:,::1] num_susceptible = np.zeros((num_its+1,1)) * cdef double[:,::1] num_infected = np.zeros((num_its + 1,1)) # <<<<<<<<<<<<<< * cdef double[:,::1] num_recovered = np.zeros((num_its+1,1)) * cdef int n, i, j */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyInt_From_long((__pyx_v_num_its + 1)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (!__pyx_t_3) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 93, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_num_infected = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "enm_cython.pyx":94 * cdef double[:,::1] num_susceptible = np.zeros((num_its+1,1)) * cdef double[:,::1] num_infected = np.zeros((num_its + 1,1)) * cdef double[:,::1] num_recovered = np.zeros((num_its+1,1)) # <<<<<<<<<<<<<< * cdef int n, i, j * cdef long num_possible_infections */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyInt_From_long((__pyx_v_num_its + 1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_1); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_5) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 94, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_num_recovered = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "enm_cython.pyx":97 * cdef int n, i, j * cdef long num_possible_infections * cdef long[:,::1] infections = np.zeros((len(adj_mat),1),dtype=np.int_) # <<<<<<<<<<<<<< * cdef double infection_probability, recovery_threshold, random_event * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_adj_mat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = PyObject_Length(__pyx_t_1); if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(__pyx_t_5); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 97, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_infections = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "enm_cython.pyx":100 * cdef double infection_probability, recovery_threshold, random_event * * for n in range(num_its): # <<<<<<<<<<<<<< * infections = np.zeros((len(adj_mat),1)) #number of infection events each person experiences * for i in range(len(graph_attributes)): */ __pyx_t_9 = __pyx_v_num_its; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_n = __pyx_t_10; /* "enm_cython.pyx":101 * * for n in range(num_its): * infections = np.zeros((len(adj_mat),1)) #number of infection events each person experiences # <<<<<<<<<<<<<< * for i in range(len(graph_attributes)): * for j in range(len(graph_attributes)): #iterating over every edge in graph */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_adj_mat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = PyObject_Length(__pyx_t_3); if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyInt_FromSsize_t(__pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_INCREF(__pyx_int_1); __Pyx_GIVEREF(__pyx_int_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_1); __pyx_t_3 = 0; __pyx_t_3 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (!__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_5); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_2}; __pyx_t_5 = __Pyx_PyFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_1)) { PyObject *__pyx_temp[2] = {__pyx_t_3, __pyx_t_2}; __pyx_t_5 = __Pyx_PyCFunction_FastCall(__pyx_t_1, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_8 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(__pyx_t_5); if (unlikely(!__pyx_t_8.memview)) __PYX_ERR(0, 101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_XDEC_MEMVIEW(&__pyx_v_infections, 1); __pyx_v_infections = __pyx_t_8; __pyx_t_8.memview = NULL; __pyx_t_8.data = NULL; /* "enm_cython.pyx":102 * for n in range(num_its): * infections = np.zeros((len(adj_mat),1)) #number of infection events each person experiences * for i in range(len(graph_attributes)): # <<<<<<<<<<<<<< * for j in range(len(graph_attributes)): #iterating over every edge in graph * */ __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_graph_attributes, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_7 == -1)) __PYX_ERR(0, 102, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_7; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "enm_cython.pyx":103 * infections = np.zeros((len(adj_mat),1)) #number of infection events each person experiences * for i in range(len(graph_attributes)): * for j in range(len(graph_attributes)): #iterating over every edge in graph # <<<<<<<<<<<<<< * * if i > j: #only counting the lower triangle of the symmetric matrix */ __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_graph_attributes, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_12 = PyObject_Length(__pyx_t_5); if (unlikely(__pyx_t_12 == -1)) __PYX_ERR(0, 103, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "enm_cython.pyx":105 * for j in range(len(graph_attributes)): #iterating over every edge in graph * * if i > j: #only counting the lower triangle of the symmetric matrix # <<<<<<<<<<<<<< * * if adj_mat[i,j] != 0: #only connected people */ __pyx_t_14 = ((__pyx_v_i > __pyx_v_j) != 0); if (__pyx_t_14) { /* "enm_cython.pyx":107 * if i > j: #only counting the lower triangle of the symmetric matrix * * if adj_mat[i,j] != 0: #only connected people # <<<<<<<<<<<<<< * * if graph_attributes[i,0] == 1 and graph_attributes[j,1] == 1: */ __pyx_t_15 = __pyx_v_i; __pyx_t_16 = __pyx_v_j; __pyx_t_17 = -1; if (__pyx_t_15 < 0) { __pyx_t_15 += __pyx_v_adj_mat.shape[0]; if (unlikely(__pyx_t_15 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_15 >= __pyx_v_adj_mat.shape[0])) __pyx_t_17 = 0; if (__pyx_t_16 < 0) { __pyx_t_16 += __pyx_v_adj_mat.shape[1]; if (unlikely(__pyx_t_16 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_16 >= __pyx_v_adj_mat.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 107, __pyx_L1_error) } __pyx_t_14 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_adj_mat.data + __pyx_t_15 * __pyx_v_adj_mat.strides[0]) )) + __pyx_t_16)) ))) != 0.0) != 0); if (__pyx_t_14) { /* "enm_cython.pyx":109 * if adj_mat[i,j] != 0: #only connected people * * if graph_attributes[i,0] == 1 and graph_attributes[j,1] == 1: # <<<<<<<<<<<<<< * infections[i,0] += 1 * #only counting events b/w an infected and susceptible person */ __pyx_t_18 = __pyx_v_i; __pyx_t_19 = 0; __pyx_t_17 = -1; if (__pyx_t_18 < 0) { __pyx_t_18 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_18 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_18 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_17 = 0; if (__pyx_t_19 < 0) { __pyx_t_19 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_19 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_19 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 109, __pyx_L1_error) } __pyx_t_20 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_18 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_19)) ))) == 1.0) != 0); if (__pyx_t_20) { } else { __pyx_t_14 = __pyx_t_20; goto __pyx_L12_bool_binop_done; } __pyx_t_21 = __pyx_v_j; __pyx_t_22 = 1; __pyx_t_17 = -1; if (__pyx_t_21 < 0) { __pyx_t_21 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_21 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_21 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_17 = 0; if (__pyx_t_22 < 0) { __pyx_t_22 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_22 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_22 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 109, __pyx_L1_error) } __pyx_t_20 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_21 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_22)) ))) == 1.0) != 0); __pyx_t_14 = __pyx_t_20; __pyx_L12_bool_binop_done:; if (__pyx_t_14) { /* "enm_cython.pyx":110 * * if graph_attributes[i,0] == 1 and graph_attributes[j,1] == 1: * infections[i,0] += 1 # <<<<<<<<<<<<<< * #only counting events b/w an infected and susceptible person * elif graph_attributes[i,1] == 1 and graph_attributes[j,0] == 1: */ __pyx_t_23 = __pyx_v_i; __pyx_t_24 = 0; __pyx_t_17 = -1; if (__pyx_t_23 < 0) { __pyx_t_23 += __pyx_v_infections.shape[0]; if (unlikely(__pyx_t_23 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_23 >= __pyx_v_infections.shape[0])) __pyx_t_17 = 0; if (__pyx_t_24 < 0) { __pyx_t_24 += __pyx_v_infections.shape[1]; if (unlikely(__pyx_t_24 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_24 >= __pyx_v_infections.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 110, __pyx_L1_error) } *((long *) ( /* dim=1 */ ((char *) (((long *) ( /* dim=0 */ (__pyx_v_infections.data + __pyx_t_23 * __pyx_v_infections.strides[0]) )) + __pyx_t_24)) )) += 1; /* "enm_cython.pyx":109 * if adj_mat[i,j] != 0: #only connected people * * if graph_attributes[i,0] == 1 and graph_attributes[j,1] == 1: # <<<<<<<<<<<<<< * infections[i,0] += 1 * #only counting events b/w an infected and susceptible person */ goto __pyx_L11; } /* "enm_cython.pyx":112 * infections[i,0] += 1 * #only counting events b/w an infected and susceptible person * elif graph_attributes[i,1] == 1 and graph_attributes[j,0] == 1: # <<<<<<<<<<<<<< * infections[j,0] += 1 * */ __pyx_t_25 = __pyx_v_i; __pyx_t_26 = 1; __pyx_t_17 = -1; if (__pyx_t_25 < 0) { __pyx_t_25 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_25 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_25 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_17 = 0; if (__pyx_t_26 < 0) { __pyx_t_26 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_26 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_26 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 112, __pyx_L1_error) } __pyx_t_20 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_25 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_26)) ))) == 1.0) != 0); if (__pyx_t_20) { } else { __pyx_t_14 = __pyx_t_20; goto __pyx_L14_bool_binop_done; } __pyx_t_27 = __pyx_v_j; __pyx_t_28 = 0; __pyx_t_17 = -1; if (__pyx_t_27 < 0) { __pyx_t_27 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_27 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_27 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_17 = 0; if (__pyx_t_28 < 0) { __pyx_t_28 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_28 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_28 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 112, __pyx_L1_error) } __pyx_t_20 = (((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_27 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_28)) ))) == 1.0) != 0); __pyx_t_14 = __pyx_t_20; __pyx_L14_bool_binop_done:; if (__pyx_t_14) { /* "enm_cython.pyx":113 * #only counting events b/w an infected and susceptible person * elif graph_attributes[i,1] == 1 and graph_attributes[j,0] == 1: * infections[j,0] += 1 # <<<<<<<<<<<<<< * * susceptible = graph_attributes[:,0].nonzero()[0] */ __pyx_t_29 = __pyx_v_j; __pyx_t_30 = 0; __pyx_t_17 = -1; if (__pyx_t_29 < 0) { __pyx_t_29 += __pyx_v_infections.shape[0]; if (unlikely(__pyx_t_29 < 0)) __pyx_t_17 = 0; } else if (unlikely(__pyx_t_29 >= __pyx_v_infections.shape[0])) __pyx_t_17 = 0; if (__pyx_t_30 < 0) { __pyx_t_30 += __pyx_v_infections.shape[1]; if (unlikely(__pyx_t_30 < 0)) __pyx_t_17 = 1; } else if (unlikely(__pyx_t_30 >= __pyx_v_infections.shape[1])) __pyx_t_17 = 1; if (unlikely(__pyx_t_17 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_17); __PYX_ERR(0, 113, __pyx_L1_error) } *((long *) ( /* dim=1 */ ((char *) (((long *) ( /* dim=0 */ (__pyx_v_infections.data + __pyx_t_29 * __pyx_v_infections.strides[0]) )) + __pyx_t_30)) )) += 1; /* "enm_cython.pyx":112 * infections[i,0] += 1 * #only counting events b/w an infected and susceptible person * elif graph_attributes[i,1] == 1 and graph_attributes[j,0] == 1: # <<<<<<<<<<<<<< * infections[j,0] += 1 * */ } __pyx_L11:; /* "enm_cython.pyx":107 * if i > j: #only counting the lower triangle of the symmetric matrix * * if adj_mat[i,j] != 0: #only connected people # <<<<<<<<<<<<<< * * if graph_attributes[i,0] == 1 and graph_attributes[j,1] == 1: */ } /* "enm_cython.pyx":105 * for j in range(len(graph_attributes)): #iterating over every edge in graph * * if i > j: #only counting the lower triangle of the symmetric matrix # <<<<<<<<<<<<<< * * if adj_mat[i,j] != 0: #only connected people */ } } } /* "enm_cython.pyx":115 * infections[j,0] += 1 * * susceptible = graph_attributes[:,0].nonzero()[0] # <<<<<<<<<<<<<< * * infected = graph_attributes[:,1].nonzero()[0] */ __pyx_t_31.data = __pyx_v_graph_attributes.data; __pyx_t_31.memview = __pyx_v_graph_attributes.memview; __PYX_INC_MEMVIEW(&__pyx_t_31, 0); __pyx_t_31.shape[0] = __pyx_v_graph_attributes.shape[0]; __pyx_t_31.strides[0] = __pyx_v_graph_attributes.strides[0]; __pyx_t_31.suboffsets[0] = -1; { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_graph_attributes.shape[1]; Py_ssize_t __pyx_tmp_stride = __pyx_v_graph_attributes.strides[1]; if (1 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (1 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 1)"); __PYX_ERR(0, 115, __pyx_L1_error) } __pyx_t_31.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_t_31, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __pyx_t_31.memview = NULL; __pyx_t_31.data = NULL; __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_nonzero); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_1)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (__pyx_t_1) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 115, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_5, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF_SET(__pyx_v_susceptible, __pyx_t_4); __pyx_t_4 = 0; /* "enm_cython.pyx":117 * susceptible = graph_attributes[:,0].nonzero()[0] * * infected = graph_attributes[:,1].nonzero()[0] # <<<<<<<<<<<<<< * for person in susceptible: #for every susceptible person * num_possible_infections = infections[person,0] */ __pyx_t_31.data = __pyx_v_graph_attributes.data; __pyx_t_31.memview = __pyx_v_graph_attributes.memview; __PYX_INC_MEMVIEW(&__pyx_t_31, 0); __pyx_t_31.shape[0] = __pyx_v_graph_attributes.shape[0]; __pyx_t_31.strides[0] = __pyx_v_graph_attributes.strides[0]; __pyx_t_31.suboffsets[0] = -1; { Py_ssize_t __pyx_tmp_idx = 1; Py_ssize_t __pyx_tmp_shape = __pyx_v_graph_attributes.shape[1]; Py_ssize_t __pyx_tmp_stride = __pyx_v_graph_attributes.strides[1]; if (1 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (1 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 1)"); __PYX_ERR(0, 117, __pyx_L1_error) } __pyx_t_31.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_t_31, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __pyx_t_31.memview = NULL; __pyx_t_31.data = NULL; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_nonzero); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (__pyx_t_5) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } else { __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 117, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_4, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF_SET(__pyx_v_infected, __pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":118 * * infected = graph_attributes[:,1].nonzero()[0] * for person in susceptible: #for every susceptible person # <<<<<<<<<<<<<< * num_possible_infections = infections[person,0] * if num_possible_infections > 0: #if they had at least one opportunity to be infected */ if (likely(PyList_CheckExact(__pyx_v_susceptible)) || PyTuple_CheckExact(__pyx_v_susceptible)) { __pyx_t_1 = __pyx_v_susceptible; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0; __pyx_t_32 = NULL; } else { __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_susceptible); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_32 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_32)) __PYX_ERR(0, 118, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_32)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_4); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 118, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_4); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 118, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 118, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_32(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 118, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } __Pyx_XDECREF_SET(__pyx_v_person, __pyx_t_4); __pyx_t_4 = 0; /* "enm_cython.pyx":119 * infected = graph_attributes[:,1].nonzero()[0] * for person in susceptible: #for every susceptible person * num_possible_infections = infections[person,0] # <<<<<<<<<<<<<< * if num_possible_infections > 0: #if they had at least one opportunity to be infected * infection_probability = transmission_probability * occupation_probability #their overall infection probability */ __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_person); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L1_error) __pyx_t_33 = __pyx_t_12; __pyx_t_34 = 0; __pyx_t_11 = -1; if (__pyx_t_33 < 0) { __pyx_t_33 += __pyx_v_infections.shape[0]; if (unlikely(__pyx_t_33 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_33 >= __pyx_v_infections.shape[0])) __pyx_t_11 = 0; if (__pyx_t_34 < 0) { __pyx_t_34 += __pyx_v_infections.shape[1]; if (unlikely(__pyx_t_34 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_34 >= __pyx_v_infections.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 119, __pyx_L1_error) } __pyx_v_num_possible_infections = (*((long *) ( /* dim=1 */ ((char *) (((long *) ( /* dim=0 */ (__pyx_v_infections.data + __pyx_t_33 * __pyx_v_infections.strides[0]) )) + __pyx_t_34)) ))); /* "enm_cython.pyx":120 * for person in susceptible: #for every susceptible person * num_possible_infections = infections[person,0] * if num_possible_infections > 0: #if they had at least one opportunity to be infected # <<<<<<<<<<<<<< * infection_probability = transmission_probability * occupation_probability #their overall infection probability * for i in range(num_possible_infections): */ __pyx_t_14 = ((__pyx_v_num_possible_infections > 0) != 0); if (__pyx_t_14) { /* "enm_cython.pyx":121 * num_possible_infections = infections[person,0] * if num_possible_infections > 0: #if they had at least one opportunity to be infected * infection_probability = transmission_probability * occupation_probability #their overall infection probability # <<<<<<<<<<<<<< * for i in range(num_possible_infections): * random_event = np.random.random_sample() */ __pyx_v_infection_probability = (__pyx_v_transmission_probability * __pyx_v_occupation_probability); /* "enm_cython.pyx":122 * if num_possible_infections > 0: #if they had at least one opportunity to be infected * infection_probability = transmission_probability * occupation_probability #their overall infection probability * for i in range(num_possible_infections): # <<<<<<<<<<<<<< * random_event = np.random.random_sample() * if infection_probability > random_event: #anything below infection probability considered an infection */ __pyx_t_35 = __pyx_v_num_possible_infections; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_35; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "enm_cython.pyx":123 * infection_probability = transmission_probability * occupation_probability #their overall infection probability * for i in range(num_possible_infections): * random_event = np.random.random_sample() # <<<<<<<<<<<<<< * if infection_probability > random_event: #anything below infection probability considered an infection * graph_attributes[person,0] = 0 */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_random_sample); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (__pyx_t_2) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 123, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_36 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_36 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 123, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_random_event = __pyx_t_36; /* "enm_cython.pyx":124 * for i in range(num_possible_infections): * random_event = np.random.random_sample() * if infection_probability > random_event: #anything below infection probability considered an infection # <<<<<<<<<<<<<< * graph_attributes[person,0] = 0 * graph_attributes[person,1] = 1 */ __pyx_t_14 = ((__pyx_v_infection_probability > __pyx_v_random_event) != 0); if (__pyx_t_14) { /* "enm_cython.pyx":125 * random_event = np.random.random_sample() * if infection_probability > random_event: #anything below infection probability considered an infection * graph_attributes[person,0] = 0 # <<<<<<<<<<<<<< * graph_attributes[person,1] = 1 * break */ __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_person); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 125, __pyx_L1_error) __pyx_t_37 = __pyx_t_12; __pyx_t_38 = 0; __pyx_t_13 = -1; if (__pyx_t_37 < 0) { __pyx_t_37 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_37 < 0)) __pyx_t_13 = 0; } else if (unlikely(__pyx_t_37 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_13 = 0; if (__pyx_t_38 < 0) { __pyx_t_38 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_38 < 0)) __pyx_t_13 = 1; } else if (unlikely(__pyx_t_38 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_13 = 1; if (unlikely(__pyx_t_13 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_13); __PYX_ERR(0, 125, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_37 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_38)) )) = 0.0; /* "enm_cython.pyx":126 * if infection_probability > random_event: #anything below infection probability considered an infection * graph_attributes[person,0] = 0 * graph_attributes[person,1] = 1 # <<<<<<<<<<<<<< * break * */ __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_person); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 126, __pyx_L1_error) __pyx_t_39 = __pyx_t_12; __pyx_t_40 = 1; __pyx_t_13 = -1; if (__pyx_t_39 < 0) { __pyx_t_39 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_39 < 0)) __pyx_t_13 = 0; } else if (unlikely(__pyx_t_39 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_13 = 0; if (__pyx_t_40 < 0) { __pyx_t_40 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_40 < 0)) __pyx_t_13 = 1; } else if (unlikely(__pyx_t_40 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_13 = 1; if (unlikely(__pyx_t_13 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_13); __PYX_ERR(0, 126, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_39 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_40)) )) = 1.0; /* "enm_cython.pyx":127 * graph_attributes[person,0] = 0 * graph_attributes[person,1] = 1 * break # <<<<<<<<<<<<<< * * */ goto __pyx_L20_break; /* "enm_cython.pyx":124 * for i in range(num_possible_infections): * random_event = np.random.random_sample() * if infection_probability > random_event: #anything below infection probability considered an infection # <<<<<<<<<<<<<< * graph_attributes[person,0] = 0 * graph_attributes[person,1] = 1 */ } } __pyx_L20_break:; /* "enm_cython.pyx":120 * for person in susceptible: #for every susceptible person * num_possible_infections = infections[person,0] * if num_possible_infections > 0: #if they had at least one opportunity to be infected # <<<<<<<<<<<<<< * infection_probability = transmission_probability * occupation_probability #their overall infection probability * for i in range(num_possible_infections): */ } /* "enm_cython.pyx":118 * * infected = graph_attributes[:,1].nonzero()[0] * for person in susceptible: #for every susceptible person # <<<<<<<<<<<<<< * num_possible_infections = infections[person,0] * if num_possible_infections > 0: #if they had at least one opportunity to be infected */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":130 * * * for person in infected: #for every infected person # <<<<<<<<<<<<<< * recovery_threshold = np.random.random_sample() * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery */ if (likely(PyList_CheckExact(__pyx_v_infected)) || PyTuple_CheckExact(__pyx_v_infected)) { __pyx_t_1 = __pyx_v_infected; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0; __pyx_t_32 = NULL; } else { __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_infected); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_32 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_32)) __PYX_ERR(0, 130, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_32)) { if (likely(PyList_CheckExact(__pyx_t_1))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_4); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 130, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_4); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 130, __pyx_L1_error) #else __pyx_t_4 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 130, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } } else { __pyx_t_4 = __pyx_t_32(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(0, 130, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_4); } __Pyx_XDECREF_SET(__pyx_v_person, __pyx_t_4); __pyx_t_4 = 0; /* "enm_cython.pyx":131 * * for person in infected: #for every infected person * recovery_threshold = np.random.random_sample() # <<<<<<<<<<<<<< * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery * graph_attributes[person,1] = 0 */ __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_random_sample); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } if (__pyx_t_2) { __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __pyx_t_4 = __Pyx_PyObject_CallNoArg(__pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 131, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_36 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_36 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 131, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_recovery_threshold = __pyx_t_36; /* "enm_cython.pyx":132 * for person in infected: #for every infected person * recovery_threshold = np.random.random_sample() * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery # <<<<<<<<<<<<<< * graph_attributes[person,1] = 0 * graph_attributes[person,2] = 1 */ __pyx_t_14 = ((__pyx_v_recovery_probability > __pyx_v_recovery_threshold) != 0); if (__pyx_t_14) { /* "enm_cython.pyx":133 * recovery_threshold = np.random.random_sample() * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery * graph_attributes[person,1] = 0 # <<<<<<<<<<<<<< * graph_attributes[person,2] = 1 * */ __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_person); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 133, __pyx_L1_error) __pyx_t_41 = __pyx_t_12; __pyx_t_42 = 1; __pyx_t_11 = -1; if (__pyx_t_41 < 0) { __pyx_t_41 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_41 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_41 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_11 = 0; if (__pyx_t_42 < 0) { __pyx_t_42 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_42 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_42 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 133, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_41 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_42)) )) = 0.0; /* "enm_cython.pyx":134 * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery * graph_attributes[person,1] = 0 * graph_attributes[person,2] = 1 # <<<<<<<<<<<<<< * * num_susceptible[n+1,0], num_infected[n+1,0], num_recovered[n+1,0] = np.sum(graph_attributes,axis=0) */ __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_person); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 134, __pyx_L1_error) __pyx_t_43 = __pyx_t_12; __pyx_t_44 = 2; __pyx_t_11 = -1; if (__pyx_t_43 < 0) { __pyx_t_43 += __pyx_v_graph_attributes.shape[0]; if (unlikely(__pyx_t_43 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_43 >= __pyx_v_graph_attributes.shape[0])) __pyx_t_11 = 0; if (__pyx_t_44 < 0) { __pyx_t_44 += __pyx_v_graph_attributes.shape[1]; if (unlikely(__pyx_t_44 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_44 >= __pyx_v_graph_attributes.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 134, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_graph_attributes.data + __pyx_t_43 * __pyx_v_graph_attributes.strides[0]) )) + __pyx_t_44)) )) = 1.0; /* "enm_cython.pyx":132 * for person in infected: #for every infected person * recovery_threshold = np.random.random_sample() * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery # <<<<<<<<<<<<<< * graph_attributes[person,1] = 0 * graph_attributes[person,2] = 1 */ } /* "enm_cython.pyx":130 * * * for person in infected: #for every infected person # <<<<<<<<<<<<<< * recovery_threshold = np.random.random_sample() * if recovery_probability > recovery_threshold: #anything below recovery probability considered a recovery */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "enm_cython.pyx":136 * graph_attributes[person,2] = 1 * * num_susceptible[n+1,0], num_infected[n+1,0], num_recovered[n+1,0] = np.sum(graph_attributes,axis=0) # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_sum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_graph_attributes, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_axis, __pyx_int_0) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { PyObject* sequence = __pyx_t_2; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 3)) { if (size > 3) __Pyx_RaiseTooManyValuesError(3); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 136, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 2); } else { __pyx_t_1 = PyList_GET_ITEM(sequence, 0); __pyx_t_5 = PyList_GET_ITEM(sequence, 1); __pyx_t_4 = PyList_GET_ITEM(sequence, 2); } __Pyx_INCREF(__pyx_t_1); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { Py_ssize_t index = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_45 = Py_TYPE(__pyx_t_3)->tp_iternext; index = 0; __pyx_t_1 = __pyx_t_45(__pyx_t_3); if (unlikely(!__pyx_t_1)) goto __pyx_L25_unpacking_failed; __Pyx_GOTREF(__pyx_t_1); index = 1; __pyx_t_5 = __pyx_t_45(__pyx_t_3); if (unlikely(!__pyx_t_5)) goto __pyx_L25_unpacking_failed; __Pyx_GOTREF(__pyx_t_5); index = 2; __pyx_t_4 = __pyx_t_45(__pyx_t_3); if (unlikely(!__pyx_t_4)) goto __pyx_L25_unpacking_failed; __Pyx_GOTREF(__pyx_t_4); if (__Pyx_IternextUnpackEndCheck(__pyx_t_45(__pyx_t_3), 3) < 0) __PYX_ERR(0, 136, __pyx_L1_error) __pyx_t_45 = NULL; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L26_unpacking_done; __pyx_L25_unpacking_failed:; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_45 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 136, __pyx_L1_error) __pyx_L26_unpacking_done:; } __pyx_t_36 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_36 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_46 = __pyx_PyFloat_AsDouble(__pyx_t_5); if (unlikely((__pyx_t_46 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_47 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_47 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 136, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_48 = (__pyx_v_n + 1); __pyx_t_49 = 0; __pyx_t_11 = -1; if (__pyx_t_48 < 0) { __pyx_t_48 += __pyx_v_num_susceptible.shape[0]; if (unlikely(__pyx_t_48 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_48 >= __pyx_v_num_susceptible.shape[0])) __pyx_t_11 = 0; if (__pyx_t_49 < 0) { __pyx_t_49 += __pyx_v_num_susceptible.shape[1]; if (unlikely(__pyx_t_49 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_49 >= __pyx_v_num_susceptible.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 136, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_num_susceptible.data + __pyx_t_48 * __pyx_v_num_susceptible.strides[0]) )) + __pyx_t_49)) )) = __pyx_t_36; __pyx_t_50 = (__pyx_v_n + 1); __pyx_t_51 = 0; __pyx_t_11 = -1; if (__pyx_t_50 < 0) { __pyx_t_50 += __pyx_v_num_infected.shape[0]; if (unlikely(__pyx_t_50 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_50 >= __pyx_v_num_infected.shape[0])) __pyx_t_11 = 0; if (__pyx_t_51 < 0) { __pyx_t_51 += __pyx_v_num_infected.shape[1]; if (unlikely(__pyx_t_51 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_51 >= __pyx_v_num_infected.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 136, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_num_infected.data + __pyx_t_50 * __pyx_v_num_infected.strides[0]) )) + __pyx_t_51)) )) = __pyx_t_46; __pyx_t_52 = (__pyx_v_n + 1); __pyx_t_53 = 0; __pyx_t_11 = -1; if (__pyx_t_52 < 0) { __pyx_t_52 += __pyx_v_num_recovered.shape[0]; if (unlikely(__pyx_t_52 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_52 >= __pyx_v_num_recovered.shape[0])) __pyx_t_11 = 0; if (__pyx_t_53 < 0) { __pyx_t_53 += __pyx_v_num_recovered.shape[1]; if (unlikely(__pyx_t_53 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_53 >= __pyx_v_num_recovered.shape[1])) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 136, __pyx_L1_error) } *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_num_recovered.data + __pyx_t_52 * __pyx_v_num_recovered.strides[0]) )) + __pyx_t_53)) )) = __pyx_t_47; } /* "enm_cython.pyx":139 * * * return num_susceptible[:,0], num_infected[:,0], num_recovered[:,0] # <<<<<<<<<<<<<< * * */ __pyx_t_31.data = __pyx_v_num_susceptible.data; __pyx_t_31.memview = __pyx_v_num_susceptible.memview; __PYX_INC_MEMVIEW(&__pyx_t_31, 0); __pyx_t_31.shape[0] = __pyx_v_num_susceptible.shape[0]; __pyx_t_31.strides[0] = __pyx_v_num_susceptible.strides[0]; __pyx_t_31.suboffsets[0] = -1; { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_num_susceptible.shape[1]; Py_ssize_t __pyx_tmp_stride = __pyx_v_num_susceptible.strides[1]; if (1 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (1 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 1)"); __PYX_ERR(0, 139, __pyx_L1_error) } __pyx_t_31.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_t_31, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __pyx_t_31.memview = NULL; __pyx_t_31.data = NULL; __pyx_t_31.data = __pyx_v_num_infected.data; __pyx_t_31.memview = __pyx_v_num_infected.memview; __PYX_INC_MEMVIEW(&__pyx_t_31, 0); __pyx_t_31.shape[0] = __pyx_v_num_infected.shape[0]; __pyx_t_31.strides[0] = __pyx_v_num_infected.strides[0]; __pyx_t_31.suboffsets[0] = -1; { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_num_infected.shape[1]; Py_ssize_t __pyx_tmp_stride = __pyx_v_num_infected.strides[1]; if (1 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (1 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 1)"); __PYX_ERR(0, 139, __pyx_L1_error) } __pyx_t_31.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4 = __pyx_memoryview_fromslice(__pyx_t_31, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __pyx_t_31.memview = NULL; __pyx_t_31.data = NULL; __pyx_t_31.data = __pyx_v_num_recovered.data; __pyx_t_31.memview = __pyx_v_num_recovered.memview; __PYX_INC_MEMVIEW(&__pyx_t_31, 0); __pyx_t_31.shape[0] = __pyx_v_num_recovered.shape[0]; __pyx_t_31.strides[0] = __pyx_v_num_recovered.strides[0]; __pyx_t_31.suboffsets[0] = -1; { Py_ssize_t __pyx_tmp_idx = 0; Py_ssize_t __pyx_tmp_shape = __pyx_v_num_recovered.shape[1]; Py_ssize_t __pyx_tmp_stride = __pyx_v_num_recovered.strides[1]; if (1 && (__pyx_tmp_idx < 0)) __pyx_tmp_idx += __pyx_tmp_shape; if (1 && (__pyx_tmp_idx < 0 || __pyx_tmp_idx >= __pyx_tmp_shape)) { PyErr_SetString(PyExc_IndexError, "Index out of bounds (axis 1)"); __PYX_ERR(0, 139, __pyx_L1_error) } __pyx_t_31.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_t_31, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __pyx_t_31.memview = NULL; __pyx_t_31.data = NULL; __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_9 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 139, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_9; goto __pyx_L0; /* "enm_cython.pyx":89 * # return susceptible, infected, recovered * * cdef int c_sri_mc(double[:, ::1] adj_mat, double[:,::1] graph_attributes, long[:, ::1] age, double transmission_probability, double recovery_probability, double occupation_probability, int num_its): # <<<<<<<<<<<<<< * * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_8, 1); __PYX_XDEC_MEMVIEW(&__pyx_t_31, 1); __Pyx_WriteUnraisable("enm_cython.c_sri_mc", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_num_susceptible, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_num_infected, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_num_recovered, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_infections, 1); __Pyx_XDECREF(__pyx_v_susceptible); __Pyx_XDECREF(__pyx_v_infected); __Pyx_XDECREF(__pyx_v_person); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":230 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 230, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":229 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":233 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":232 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":236 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 236, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":235 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":245 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 245, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":244 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":248 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 248, __pyx_L1_error) /* "View.MemoryView":247 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":251 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":240 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 277, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 277, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":278 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":277 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":280 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":279 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":296 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":300 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":303 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":302 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":305 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":294 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 341, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 341, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 341, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 341, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":342 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":343 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":345 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 345, __pyx_L1_error) /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":347 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":348 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":346 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":344 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":352 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":353 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":351 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":355 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 357, __pyx_L1_error) /* "View.MemoryView":356 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":354 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":360 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":359 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":362 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":364 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":366 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":341 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":370 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":369 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":375 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":380 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":379 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":378 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":381 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":376 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":383 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":374 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":368 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":387 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 389, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 389, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 389, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":390 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 390, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":389 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":392 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":385 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":397 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":396 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":399 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 399, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 399, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 402, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":403 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":402 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":405 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 405, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":406 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":395 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":409 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 409, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 409, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 411, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":412 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":416 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 416, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":411 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":418 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":408 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":424 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 424, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":423 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 423, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":425 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 425, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":422 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L11_try_end:; } /* "View.MemoryView":421 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":428 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":420 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":435 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 435, __pyx_L1_error) /* "View.MemoryView":436 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 436, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":434 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 434, __pyx_L1_error) /* "View.MemoryView":430 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":440 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":445 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":448 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 450, __pyx_L1_error) /* "View.MemoryView":449 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":451 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":447 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":453 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":455 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":457 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":456 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":459 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 459, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 464, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":463 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":465 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":468 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __Pyx_PyThreadState_declare __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } __Pyx_PyThreadState_assign if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":438 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":471 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) __PYX_ERR(1, 471, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":472 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 472, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":477 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":480 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 480, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":482 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":487 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 487, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":486 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":488 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_PyThreadState_assign __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 483, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 484, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":481 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_PyThreadState_assign __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":474 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":493 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":499 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 499, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":498 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":501 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 503, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":504 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":490 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":509 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":508 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":511 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":514 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":513 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":516 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":518 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":521 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":523 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":526 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":528 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":529 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":530 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":531 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":532 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":533 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":507 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":540 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 540, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":541 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 541, __pyx_L1_error) /* "View.MemoryView":542 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":539 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":546 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":545 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":549 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 556, __pyx_L1_error) /* "View.MemoryView":554 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":558 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":553 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":562 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":565 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":561 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":569 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":568 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":573 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":572 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":577 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":582 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":584 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":585 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":587 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":581 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":589 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":580 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":593 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":592 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":595 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":591 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":599 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":598 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":597 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":602 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":601 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":608 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":609 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":605 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":614 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":615 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 615, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":619 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":621 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":622 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":627 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":617 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":631 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":633 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":634 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 634, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":639 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 639, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":629 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":644 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 644, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":645 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":646 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":643 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":650 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":649 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":658 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":657 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":660 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":662 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":663 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":664 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 665, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 665, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 665, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__13); __Pyx_GIVEREF(__pyx_slice__13); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__13); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":669 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":667 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__14); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 671, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":672 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":666 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":675 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 675, __pyx_L1_error) /* "View.MemoryView":674 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":677 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":678 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 678, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":665 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":680 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) __PYX_ERR(1, 680, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":681 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":684 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":652 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":687 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":686 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":697 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":704 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":708 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 708, __pyx_L1_error) } } #endif /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":711 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 711, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":712 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":710 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":714 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":715 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":721 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":722 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":727 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":728 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 732, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 732, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 732, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":737 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 737, __pyx_L1_error) /* "View.MemoryView":734 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 734, __pyx_L1_error) /* "View.MemoryView":733 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":741 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":742 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":743 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":744 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":740 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":746 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 746, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":747 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 747, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 747, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":748 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":750 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":751 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 751, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":752 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":754 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) __PYX_ERR(1, 754, __pyx_L1_error) /* "View.MemoryView":760 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":732 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":764 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 764, __pyx_L1_error) } /* "View.MemoryView":765 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 765, __pyx_L1_error) } /* "View.MemoryView":763 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 763, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 763, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":762 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":769 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":768 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 768, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":696 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":816 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":815 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":818 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 818, __pyx_L1_error) /* "View.MemoryView":817 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":813 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":821 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":824 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 824, __pyx_L1_error) /* "View.MemoryView":823 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":829 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":830 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":828 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":834 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":833 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":836 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":832 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":827 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":838 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":841 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":846 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":844 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":848 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":843 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":852 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":851 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":854 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":856 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":861 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":864 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":863 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":866 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":870 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":871 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":872 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":876 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":875 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":878 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":882 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":885 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":886 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 885, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":881 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":888 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":880 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":890 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":793 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":898 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":899 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":903 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 903, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 903, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":904 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":902 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":906 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":907 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":909 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":908 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":912 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 914, __pyx_L1_error) /* "View.MemoryView":913 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":911 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 917, __pyx_L1_error) /* "View.MemoryView":916 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":919 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":921 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":920 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":923 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":896 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":930 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":932 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":933 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":937 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":938 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":939 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":940 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":943 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(1, 943, __pyx_L1_error) /* "View.MemoryView":942 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":945 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":929 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":963 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":962 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 967, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":966 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":969 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":965 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":973 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) __PYX_ERR(1, 973, __pyx_L1_error) /* "View.MemoryView":972 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":975 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":971 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":979 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":978 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":994 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":993 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":999 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1001 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1002 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1004 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1004, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1005 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1007 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1008 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1009 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1010 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1011 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1013 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1015 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1016 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1019 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1020 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1022 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1023 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1021 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1025 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1026 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1027 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1027, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1029 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1030 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1032 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":985 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1039 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1039, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1040 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1038 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1042 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1043 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1035 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1050 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1051 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1052 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1054 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1055 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1057 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1058 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1059 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1060 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1046 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1066 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1067 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1067, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1078 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1079 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1077 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1081 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1082 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1084 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1086 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1070 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1094 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1093 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1096 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1092 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1104 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1105 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1107 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1109 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1110 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1108 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1112 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1114 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1115 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1113 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1118 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1117 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1120 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1099 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1130 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1131 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1132 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1137 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1138 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1136 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1140 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1141 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1142 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1143 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1135 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1145 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1146 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1150 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1151 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1123 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1156 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1153 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1163 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1165 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1166 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1168 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1160 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1181 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1182 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1183 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1180 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1185 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1186 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1187 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1189 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1171 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1203 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1204 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1206 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1208 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) __PYX_ERR(1, 1208, __pyx_L1_error) /* "View.MemoryView":1207 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1211 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1212 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1213 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1214 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1215 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1217 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1221 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1223 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1222 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1226 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1225 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1228 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1230 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1192 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1238 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1237 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1237, __pyx_L1_error) /* "View.MemoryView":1235 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1242 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1242, __pyx_L1_error) /* "View.MemoryView":1241 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1247 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1247, __pyx_L1_error) /* "View.MemoryView":1246 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1249 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1249, __pyx_L1_error) } /* "View.MemoryView":1245 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1260 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1261 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1263 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1264 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1265 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1269 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1268 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1270 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1273 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1275 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1278 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1279 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1277 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1281 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1281, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1276 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) __PYX_ERR(1, 1284, __pyx_L1_error) /* "View.MemoryView":1283 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1289 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1288 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1291 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) __PYX_ERR(1, 1291, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1292 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1286 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1298 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1297 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1299 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1305 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1306 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1307 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1294 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1313 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1313, __pyx_L1_error) /* "View.MemoryView":1314 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) __PYX_ERR(1, 1314, __pyx_L1_error) /* "View.MemoryView":1310 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1316 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1317 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1318 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1252 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1328 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1330 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1331 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1332 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1333 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1335 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1336 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1337 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1338 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1324 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1351 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1350 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1346 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1358 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1355 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1365 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1368 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1367 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1370 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1366 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1372 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1373 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1375 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1384 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1385 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1387 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1381 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1395 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1396 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1399 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1400 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1401 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1398 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1403 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1404 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1406 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "enm_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "enm_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "enm_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "enm_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "enm_cython", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_n_s_C, __pyx_k_C, sizeof(__pyx_k_C), 0, 0, 1, 1}, {&__pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_k_C_Users_club084_Documents_Classe, sizeof(__pyx_k_C_Users_club084_Documents_Classe), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_kp_s_This_option_not_implemented_yet, __pyx_k_This_option_not_implemented_yet, sizeof(__pyx_k_This_option_not_implemented_yet), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1}, {&__pyx_n_s_Y, __pyx_k_Y, sizeof(__pyx_k_Y), 0, 0, 1, 1}, {&__pyx_n_s_adjacency_matrix, __pyx_k_adjacency_matrix, sizeof(__pyx_k_adjacency_matrix), 0, 0, 1, 1}, {&__pyx_n_s_age, __pyx_k_age, sizeof(__pyx_k_age), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, {&__pyx_n_s_array_f, __pyx_k_array_f, sizeof(__pyx_k_array_f), 0, 0, 1, 1}, {&__pyx_n_s_array_f_multi_wrapper, __pyx_k_array_f_multi_wrapper, sizeof(__pyx_k_array_f_multi_wrapper), 0, 0, 1, 1}, {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_c_array_f, __pyx_k_c_array_f, sizeof(__pyx_k_c_array_f), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_cython_wrapper_sri_mc, __pyx_k_cython_wrapper_sri_mc, sizeof(__pyx_k_cython_wrapper_sri_mc), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enm_cython, __pyx_k_enm_cython, sizeof(__pyx_k_enm_cython), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1}, {&__pyx_n_s_fib, __pyx_k_fib, sizeof(__pyx_k_fib), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_graph_attributes, __pyx_k_graph_attributes, sizeof(__pyx_k_graph_attributes), 0, 0, 1, 1}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_index, __pyx_k_index, sizeof(__pyx_k_index), 0, 0, 1, 1}, {&__pyx_n_s_init_distrib, __pyx_k_init_distrib, sizeof(__pyx_k_init_distrib), 0, 0, 1, 1}, {&__pyx_n_s_int, __pyx_k_int, sizeof(__pyx_k_int), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_math, __pyx_k_math, sizeof(__pyx_k_math), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_nonzero, __pyx_k_nonzero, sizeof(__pyx_k_nonzero), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_num_infected, __pyx_k_num_infected, sizeof(__pyx_k_num_infected), 0, 0, 1, 1}, {&__pyx_n_s_num_its, __pyx_k_num_its, sizeof(__pyx_k_num_its), 0, 0, 1, 1}, {&__pyx_n_s_num_people, __pyx_k_num_people, sizeof(__pyx_k_num_people), 0, 0, 1, 1}, {&__pyx_n_s_num_recovered, __pyx_k_num_recovered, sizeof(__pyx_k_num_recovered), 0, 0, 1, 1}, {&__pyx_n_s_num_susceptible, __pyx_k_num_susceptible, sizeof(__pyx_k_num_susceptible), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_occupation_probability, __pyx_k_occupation_probability, sizeof(__pyx_k_occupation_probability), 0, 0, 1, 1}, {&__pyx_n_s_ones, __pyx_k_ones, sizeof(__pyx_k_ones), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, {&__pyx_n_s_random_sample, __pyx_k_random_sample, sizeof(__pyx_k_random_sample), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_recovery_probability, __pyx_k_recovery_probability, sizeof(__pyx_k_recovery_probability), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_sum, __pyx_k_sum, sizeof(__pyx_k_sum), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_transmission_probability, __pyx_k_transmission_probability, sizeof(__pyx_k_transmission_probability), 0, 0, 1, 1}, {&__pyx_n_s_transpose, __pyx_k_transpose, sizeof(__pyx_k_transpose), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_vstack, __pyx_k_vstack, sizeof(__pyx_k_vstack), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 11, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 83, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 396, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 425, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 599, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 818, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "enm_cython.pyx":77 * num_infected = [0 for i in range(num_its+1)] * num_recovered = [0 for i in range(num_its+1)] * graph_attributes[0,0] = 0 # <<<<<<<<<<<<<< * graph_attributes[0,1] = 1 * num_susceptible[0] = num_people - 1 */ __pyx_tuple_ = PyTuple_Pack(2, __pyx_int_0, __pyx_int_0); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 77, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "enm_cython.pyx":78 * num_recovered = [0 for i in range(num_its+1)] * graph_attributes[0,0] = 0 * graph_attributes[0,1] = 1 # <<<<<<<<<<<<<< * num_susceptible[0] = num_people - 1 * num_infected[0] = 1 */ __pyx_tuple__2 = PyTuple_Pack(2, __pyx_int_0, __pyx_int_1); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 78, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "enm_cython.pyx":83 * * else: * raise ValueError('This option not implemented yet sorry!') #first column is susceptible, second infected, third resistant # <<<<<<<<<<<<<< * * graph_attributes = np.array(graph_attributes,order='C') */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_This_option_not_implemented_yet); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":484 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":556 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 556, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":563 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 563, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":668 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__13 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__13)) __PYX_ERR(1, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__13); __Pyx_GIVEREF(__pyx_slice__13); /* "View.MemoryView":671 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__14 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__14)) __PYX_ERR(1, 671, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__14); __Pyx_GIVEREF(__pyx_slice__14); /* "View.MemoryView":682 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":689 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "enm_cython.pyx":15 * return a * * def fib(x): # <<<<<<<<<<<<<< * return cfib(x) * */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_n_s_x); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_n_s_fib, 15, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(0, 15, __pyx_L1_error) /* "enm_cython.pyx":19 * * * def array_f(X): # <<<<<<<<<<<<<< * * Y = np.zeros(X.shape) */ __pyx_tuple__19 = PyTuple_Pack(3, __pyx_n_s_X, __pyx_n_s_Y, __pyx_n_s_index); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_n_s_array_f, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 19, __pyx_L1_error) /* "enm_cython.pyx":27 * return Y * * def c_array_f(X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ __pyx_tuple__21 = PyTuple_Pack(4, __pyx_n_s_X, __pyx_n_s_N, __pyx_n_s_Y, __pyx_n_s_i); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(1, 0, 4, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_n_s_c_array_f, 27, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) __PYX_ERR(0, 27, __pyx_L1_error) /* "enm_cython.pyx":63 * * * def array_f_multi_wrapper(Y): # <<<<<<<<<<<<<< * X = Y * */ __pyx_tuple__23 = PyTuple_Pack(2, __pyx_n_s_Y, __pyx_n_s_X); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_n_s_array_f_multi_wrapper, 63, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 63, __pyx_L1_error) /* "enm_cython.pyx":68 * return c_array_f_multi(X) * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): # <<<<<<<<<<<<<< * if init_distrib == 0: * num_people = len(adjacency_matrix) */ __pyx_tuple__25 = PyTuple_Pack(13, __pyx_n_s_adjacency_matrix, __pyx_n_s_age, __pyx_n_s_transmission_probability, __pyx_n_s_recovery_probability, __pyx_n_s_occupation_probability, __pyx_n_s_init_distrib, __pyx_n_s_num_its, __pyx_n_s_num_people, __pyx_n_s_graph_attributes, __pyx_n_s_num_susceptible, __pyx_n_s_num_infected, __pyx_n_s_num_recovered, __pyx_n_s_i); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(7, 0, 13, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_C_Users_club084_Documents_Classe, __pyx_n_s_cython_wrapper_sri_mc, 68, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(0, 68, __pyx_L1_error) /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__29); __Pyx_GIVEREF(__pyx_tuple__29); /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__31); __Pyx_GIVEREF(__pyx_tuple__31); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initenm_cython(void); /*proto*/ PyMODINIT_FUNC initenm_cython(void) #else PyMODINIT_FUNC PyInit_enm_cython(void); /*proto*/ PyMODINIT_FUNC PyInit_enm_cython(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; static PyThread_type_lock __pyx_t_3[8]; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_enm_cython(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("enm_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_enm_cython) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "enm_cython")) { if (unlikely(PyDict_SetItemString(modules, "enm_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 275, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 326, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 951, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "enm_cython.pyx":2 * from cython.parallel import prange * from math import exp # <<<<<<<<<<<<<< * # from libc.math cimport exp as c_exp * import numpy as np */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_exp); __Pyx_GIVEREF(__pyx_n_s_exp); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_exp); __pyx_t_2 = __Pyx_Import(__pyx_n_s_math, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_exp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_exp, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":4 * from math import exp * # from libc.math cimport exp as c_exp * import numpy as np # <<<<<<<<<<<<<< * cimport openmp * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":15 * return a * * def fib(x): # <<<<<<<<<<<<<< * return cfib(x) * */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10enm_cython_1fib, NULL, __pyx_n_s_enm_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_fib, __pyx_t_2) < 0) __PYX_ERR(0, 15, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":19 * * * def array_f(X): # <<<<<<<<<<<<<< * * Y = np.zeros(X.shape) */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10enm_cython_3array_f, NULL, __pyx_n_s_enm_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array_f, __pyx_t_2) < 0) __PYX_ERR(0, 19, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":27 * return Y * * def c_array_f(X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10enm_cython_5c_array_f, NULL, __pyx_n_s_enm_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_c_array_f, __pyx_t_2) < 0) __PYX_ERR(0, 27, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":63 * * * def array_f_multi_wrapper(Y): # <<<<<<<<<<<<<< * X = Y * */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10enm_cython_7array_f_multi_wrapper, NULL, __pyx_n_s_enm_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_array_f_multi_wrapper, __pyx_t_2) < 0) __PYX_ERR(0, 63, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":68 * return c_array_f_multi(X) * * def cython_wrapper_sri_mc(adjacency_matrix, age,transmission_probability,recovery_probability,occupation_probability,init_distrib = 0, num_its = 0): # <<<<<<<<<<<<<< * if init_distrib == 0: * num_people = len(adjacency_matrix) */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_10enm_cython_9cython_wrapper_sri_mc, NULL, __pyx_n_s_enm_cython); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cython_wrapper_sri_mc, __pyx_t_2) < 0) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "enm_cython.pyx":1 * from cython.parallel import prange # <<<<<<<<<<<<<< * from math import exp * # from libc.math cimport exp as c_exp */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":282 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":283 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":284 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":287 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":288 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":312 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":313 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_3[0] = PyThread_allocate_lock(); __pyx_t_3[1] = PyThread_allocate_lock(); __pyx_t_3[2] = PyThread_allocate_lock(); __pyx_t_3[3] = PyThread_allocate_lock(); __pyx_t_3[4] = PyThread_allocate_lock(); __pyx_t_3[5] = PyThread_allocate_lock(); __pyx_t_3[6] = PyThread_allocate_lock(); __pyx_t_3[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":535 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 535, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":981 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1391 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init enm_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init enm_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); PyObject *result; int flags; assert(PyCFunction_Check(func)); assert(METH_FASTCALL == PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs, NULL); } #endif // CYTHON_FAST_PYCCALL /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = PyThreadState_GET(); PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif // CPython < 3.6 #endif // CYTHON_FAST_PYCALL /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* BufferIndexErrorNogil */ static void __Pyx_RaiseBufferIndexErrorNogil(int axis) { #ifdef WITH_THREAD PyGILState_STATE gilstate = PyGILState_Ensure(); #endif __Pyx_RaiseBufferIndexError(axis); #ifdef WITH_THREAD PyGILState_Release(gilstate); #endif } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { if (op1 == op2) { Py_RETURN_TRUE; } #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long a = PyInt_AS_LONG(op1); if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a; const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; } #if PyLong_SHIFT < 30 && PyLong_SHIFT != 15 default: return PyLong_Type.tp_richcompare(op1, op2, Py_EQ); #else default: Py_RETURN_FALSE; #endif } } if (a == b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); if ((double)a == (double)b) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } } return PyObject_RichCompare(op1, op2, Py_EQ); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_SubtractObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a - b); if (likely((x^a) >= 0 || (x^~b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); } } x = a - b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla - llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("subtract", return NULL) result = ((double)a) - (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); } #endif /* SetItemInt */ static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { int r; if (!j) return -1; r = PyObject_SetItem(o, j, v); Py_DECREF(j); return r; } static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { PyObject* old = PyList_GET_ITEM(o, n); Py_INCREF(v); PyList_SET_ITEM(o, n, v); Py_DECREF(old); return 1; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_ass_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return -1; PyErr_Clear(); } } return m->sq_ass_item(o, i, v); } } #else #if CYTHON_COMPILING_IN_PYPY if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { #else if (is_list || PySequence_Check(o)) { #endif return PySequence_SetItem(o, i, v); } #endif return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, NULL, 0); } #endif #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* IterFinish */ static CYTHON_INLINE int __Pyx_IterFinish(void) { #if CYTHON_FAST_THREAD_STATE PyThreadState *tstate = PyThreadState_GET(); PyObject* exc_type = tstate->curexc_type; if (unlikely(exc_type)) { if (likely(exc_type == PyExc_StopIteration) || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) { PyObject *exc_value, *exc_tb; exc_value = tstate->curexc_value; exc_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; Py_DECREF(exc_type); Py_XDECREF(exc_value); Py_XDECREF(exc_tb); return 0; } else { return -1; } } return 0; #else if (unlikely(PyErr_Occurred())) { if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { PyErr_Clear(); return 0; } else { return -1; } } return 0; #endif } /* UnpackItemEndCheck */ static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { if (unlikely(retval)) { Py_DECREF(retval); __Pyx_RaiseTooManyValuesError(expected); return -1; } else { return __Pyx_IterFinish(); } return 0; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; return PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_long(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_long, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_long(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_long, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif #else res = PyNumber_Int(x); #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_unop__identity_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_bool) // op(A') function: GB (_unop_tran__identity_fp64_bool) // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_bool) ( double *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_uint16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_int32 // op(A') function: GB_tran__identity_uint16_int32 // C type: uint16_t // A type: int32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_int32 ( uint16_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB.h
//------------------------------------------------------------------------------ // GB.h: definitions visible only inside GraphBLAS //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // These defintions are not visible to the user. They are used only inside // GraphBLAS itself. // Future plans: (see also 'grep -r FUTURE') // FUTURE: support for dense matrices (A->i and A->p as NULL pointers) // FUTURE: implement v1.3 of the API // FUTURE: add matrix I/O in binary format (see draft LAGraph_binread/binwrite) // FUTURE: add Heap method to GB_AxB_saxpy3 (inspector-executor style) // FUTURE: allow matrices and vectors to be left jumbled (sort left pending) #ifndef GB_H #define GB_H //------------------------------------------------------------------------------ // code development settings //------------------------------------------------------------------------------ // to turn on Debug for a single file of GraphBLAS, add: // #define GB_DEBUG // just before the statement: // #include "GB.h" // set GB_BURBLE to 1 to enable extensive diagnostic output, or compile with // -DGB_BURBLE=1. This setting can also be added at the top of any individual // Source/* files, before #including any other files. #ifndef GB_BURBLE #define GB_BURBLE 0 #endif // to turn on Debug for all of GraphBLAS, uncomment this line: // #define GB_DEBUG // to reduce code size and for faster time to compile, uncomment this line; // GraphBLAS will be slower. Alternatively, use cmake with -DGBCOMPACT=1 // #define GBCOMPACT 1 // for code development only // #define GB_DEVELOPER 1 // set these via cmake, or uncomment to select the user-thread model: // #define USER_POSIX_THREADS // #define USER_OPENMP_THREADS // #define USER_NO_THREADS //------------------------------------------------------------------------------ // manage compiler warnings //------------------------------------------------------------------------------ #if defined __INTEL_COMPILER // 10397: remark about where *.optrpt reports are placed // 15552: loop not vectorized #pragma warning (disable: 10397 15552 ) // disable icc -w2 warnings // 191: type qualifier meangingless // 193: zero used for undefined #define // 589: bypass initialization #pragma warning (disable: 191 193 ) // disable icc -w3 warnings // 144: initialize with incompatible pointer // 181: format // 869: unused parameters // 1572: floating point comparisons // 1599: shadow // 2259: typecasting may lose bits // 2282: unrecognized pragma // 2557: sign compare #pragma warning (disable: 144 181 869 1572 1599 2259 2282 2557 ) // See GB_unused.h, for warnings 177 and 593, which are not globally // disabled, but selectively by #include'ing GB_unused.h as needed. // resolved (warnings no longer disabled globally): // 58: sign compare // 167: incompatible pointer // 177: declared but unused // 186: useless comparison // 188: mixing enum types // 593: set but not used // 981: unspecified order // 1418: no external declaration // 1419: external declaration in source file // 2330: const incompatible // 2547: remark about include files // 3280: shadow #elif defined __GNUC__ // disable warnings for gcc 5.x and higher: #if (__GNUC__ > 4) // disable warnings // #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wint-in-bool-context" #pragma GCC diagnostic ignored "-Wformat-truncation=" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" // enable these warnings as errors #pragma GCC diagnostic error "-Wmisleading-indentation" #endif // disable warnings from -Wall -Wextra -Wpendantic #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-compare" #if defined ( __cplusplus ) #pragma GCC diagnostic ignored "-Wwrite-strings" #else #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" #endif // See GB_unused.h, where these two pragmas are used: // #pragma GCC diagnostic ignored "-Wunused-but-set-variable" // #pragma GCC diagnostic ignored "-Wunused-variable" // resolved (warnings no longer disabled globally): // #pragma GCC diagnostic ignored "-Wunknown-pragmas" // #pragma GCC diagnostic ignored "-Wtype-limits" // #pragma GCC diagnostic ignored "-Wunused-result" // #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" // enable these warnings as errors #pragma GCC diagnostic error "-Wswitch-default" #if !defined ( __cplusplus ) #pragma GCC diagnostic error "-Wmissing-prototypes" #endif // #pragma GCC diagnostic error "-Wdouble-promotion" #endif #if ( _MSC_VER && !__INTEL_COMPILER ) // disable MS Visual Studio warnings #pragma warning(disable:4146) #endif //------------------------------------------------------------------------------ // include GraphBLAS.h (depends on user threading model) //------------------------------------------------------------------------------ #ifndef MATLAB_MEX_FILE #define GB_LIBRARY #endif #include "GraphBLAS.h" //------------------------------------------------------------------------------ // compiler variations //------------------------------------------------------------------------------ // Determine the restrict keyword, and whether or not variable-length arrays // are supported. #if ( _MSC_VER && !__INTEL_COMPILER ) // Microsoft Visual Studio does not have the restrict keyword, but it does // support __restrict, which is equivalent. Variable-length arrays are // not supported. OpenMP tasks are not available. #define GB_MICROSOFT 1 #define GB_RESTRICT __restrict #define GB_HAS_VLA 0 #define GB_HAS_OPENMP_TASKS 0 #elif GxB_STDC_VERSION >= 199901L // ANSI C99 and later have the restrict keyword and variable-length arrays. #define GB_MICROSOFT 0 #define GB_RESTRICT restrict #define GB_HAS_VLA 1 #define GB_HAS_OPENMP_TASKS 1 #else // ANSI C95 and earlier have neither #define GB_MICROSOFT 0 #define GB_RESTRICT #define GB_HAS_VLA 0 #define GB_HAS_OPENMP_TASKS 1 #endif //------------------------------------------------------------------------------ // Microsoft specific include files //------------------------------------------------------------------------------ #if GB_MICROSOFT #include <malloc.h> #endif //------------------------------------------------------------------------------ // OpenMP pragmas and tasks //------------------------------------------------------------------------------ // GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the // compiler: #if GB_MICROSOFT // MS Visual Studio is not ANSI C11 compliant, and uses __pragma: #define GB_PRAGMA(x) __pragma (x) #else // ANSI C11 compilers use _Pragma: #define GB_PRAGMA(x) _Pragma (#x) #endif // construct pragmas for loop vectorization: #if GB_MICROSOFT // no #pragma omp simd is available in MS Visual Studio #define GB_PRAGMA_SIMD #define GB_PRAGMA_SIMD_REDUCTION(op,s) #else // create two kinds of SIMD pragmas: // GB_PRAGMA_SIMD becomes "#pragma omp simd" // GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes // "#pragma omp simd reduction(+:cij)" #define GB_PRAGMA_SIMD GB_PRAGMA (omp simd) #define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s)) #endif // construct pragmas for OpenMP tasks, if available: #if GB_HAS_OPENMP_TASKS // Use OpenMP tasks #define GB_TASK(func, ...) \ GB_PRAGMA(omp task firstprivate(__VA_ARGS__)) \ func (__VA_ARGS__) #define GB_TASK_WAIT GB_PRAGMA (omp taskwait) #define GB_TASK_MASTER(nthreads) \ GB_PRAGMA (omp parallel num_threads (nthreads)) \ GB_PRAGMA (omp master) #else // OpenMP tasks not available #define GB_TASK(func, ...) func (__VA_ARGS__) #define GB_TASK_WAIT #define GB_TASK_MASTER(nthreads) #endif #define GB_PRAGMA_IVDEP GB_PRAGMA(ivdep) //------------------------------------------------------------------------------ // PGI_COMPILER_BUG //------------------------------------------------------------------------------ // If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is // enabled for a bug in the PGI compiler. The compiler does not correctly // handle automatic arrays of variable size. #ifdef PGI_COMPILER_BUG // override the ANSI C compiler to turn off variable-length arrays #undef GB_HAS_VLA #define GB_HAS_VLA 0 #endif //------------------------------------------------------------------------------ // variable-length arrays //------------------------------------------------------------------------------ // If variable-length arrays are not supported, user-defined types are limited // in size to 128 bytes or less. Many of the type-generic routines allocate // workspace for a single scalar of variable size, using a statement: // // GB_void aij [xsize] ; // // To support non-variable-length arrays in ANSI C95 or earlier, this is used: // // GB_void aij [GB_VLA(xsize)] ; // // GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed // size of 128, in which case user-defined types are limited to a max of 128 // bytes. typedef unsigned char GB_void ; #if ( GB_HAS_VLA ) // variable-length arrays are allowed #define GB_VLA(s) s #else // variable-length arrays are not allowed #define GB_VLA_MAXSIZE 128 #define GB_VLA(s) GB_VLA_MAXSIZE #endif //------------------------------------------------------------------------------ // for coverage tests in Tcov/ //------------------------------------------------------------------------------ #ifdef GBCOVER #define GBCOVER_MAX 20000 GB_PUBLIC int64_t GB_cov [GBCOVER_MAX] ; GB_PUBLIC int GB_cover_max ; #endif //------------------------------------------------------------------------------ // GraphBLAS include files //------------------------------------------------------------------------------ #include "GB_cplusplus.h" #include "GB_Global.h" #include "GB_printf.h" #include "GB_assert.h" #include "GB_opaque.h" #include "GB_casting.h" #include "GB_math.h" #include "GB_bitwise.h" #include "GB_wait.h" #include "GB_binary_search.h" //------------------------------------------------------------------------------ // default options //------------------------------------------------------------------------------ // These parameters define the content of values that can be // used as inputs to GxB_*Option_set. // The default format is by row (CSR), with a hyper_ratio of 1/16. // In Versions 2.1 and earlier, the default was GxB_BY_COL (CSC format). #define GB_HYPER_DEFAULT (0.0625) // compile SuiteSparse:GraphBLAS with "-DBYCOL" to make GxB_BY_COL the default // format #ifdef BYCOL #define GB_FORMAT_DEFAULT GxB_BY_COL #else #define GB_FORMAT_DEFAULT GxB_BY_ROW #endif // these parameters define the hyper_ratio needed to ensure matrix stays // either always hypersparse, or never hypersparse. #define GB_ALWAYS_HYPER (1.0) #define GB_NEVER_HYPER (-1.0) #define GB_FORCE_HYPER 1 #define GB_FORCE_NONHYPER 0 #define GB_AUTO_HYPER (-1) #define GB_SAME_HYPER_AS(A_is_hyper) \ ((A_is_hyper) ? GB_FORCE_HYPER : GB_FORCE_NONHYPER) // if A is hypersparse but all vectors are present, then // treat A as if it were non-hypersparse #define GB_IS_HYPER(A) \ (((A) != NULL) && ((A)->is_hyper && ((A)->nvec < (A)->vdim))) //------------------------------------------------------------------------------ // macros for matrices and vectors //------------------------------------------------------------------------------ // If A->nzmax is zero, then A->p might not be allocated. Note that this // function does not count pending tuples; use GB_MATRIX_WAIT(A) first, if // needed. For sparse or hypersparse matrix, Ap [0] == 0. For a slice or // hyperslice, Ap [0] >= 0 points to the first entry in the slice. For all 4 // cases (sparse, hypersparse, slice, hyperslice), nnz(A) = Ap [nvec] - Ap [0]. #define GB_NNZ(A) (((A)->nzmax > 0) ? ((A)->p [(A)->nvec] - (A)->p [0]) : 0 ) // Upper bound on nnz(A) when the matrix has zombies and pending tuples; // does not need GB_MATRIX_WAIT(A) first. #define GB_NNZ_UPPER_BOUND(A) ((GB_NNZ (A) - A->nzombies) + GB_Pending_n (A)) int64_t GB_Pending_n // return # of pending tuples in A ( GrB_Matrix A ) ; // A is nrows-by-ncols, in either CSR or CSC format #define GB_NROWS(A) ((A)->is_csc ? (A)->vlen : (A)->vdim) #define GB_NCOLS(A) ((A)->is_csc ? (A)->vdim : (A)->vlen) // The internal content of a GrB_Matrix and GrB_Vector are identical, and // inside SuiteSparse:GraphBLAS, they can be typecasted between each other. // This typecasting feature should not be done in user code, however, since it // is not supported in the API. All GrB_Vector objects can be safely // typecasted into a GrB_Matrix, but not the other way around. The GrB_Vector // object is more restrictive. The GB_VECTOR_OK(v) macro defines the content // that all GrB_Vector objects must have. // GB_VECTOR_OK(v) is used mainly for assertions, but also to determine when it // is safe to typecast an n-by-1 GrB_Matrix (in standard CSC format) into a // GrB_Vector. This is not done in the main SuiteSparse:GraphBLAS library, but // in the GraphBLAS/Test directory only. The macro is also used in // GB_Vector_check, to ensure the content of a GrB_Vector is valid. #define GB_VECTOR_OK(v) \ ( \ ((v) != NULL) && \ ((v)->is_hyper == false) && \ ((v)->is_csc == true) && \ ((v)->plen == 1) && \ ((v)->vdim == 1) && \ ((v)->nvec == 1) && \ ((v)->h == NULL) \ ) // A GxB_Vector is a GrB_Vector of length 1 #define GB_SCALAR_OK(v) (GB_VECTOR_OK(v) && ((v)->vlen == 1)) //------------------------------------------------------------------------------ // aliased objects //------------------------------------------------------------------------------ // GraphBLAS allows all inputs to all user-accessible objects to be aliased, as // in GrB_mxm (C, C, accum, C, C, ...), which is valid. Internal routines are // more restrictive. // GB_aliased also checks the content of A and B GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_aliased // determine if A and B are aliased ( GrB_Matrix A, // input A matrix GrB_Matrix B // input B matrix ) ; //------------------------------------------------------------------------------ // internal GraphBLAS type and operator codes //------------------------------------------------------------------------------ // GB_MAGIC is an arbitrary number that is placed inside each object when it is // initialized, as a way of detecting uninitialized objects. #define GB_MAGIC 0x72657473786f62ULL // The magic number is set to GB_FREED when the object is freed, as a way of // helping to detect dangling pointers. #define GB_FREED 0x6c6c756e786f62ULL // The value is set to GB_MAGIC2 when the object has been allocated but cannot // yet be used in most methods and operations. Currently this is used only for // when A->p array is allocated but not initialized. #define GB_MAGIC2 0x7265745f786f62ULL // predefined type objects GB_PUBLIC struct GB_Type_opaque GB_opaque_GrB_BOOL , // GrB_BOOL is a pointer to this object, etc. GB_opaque_GrB_INT8 , GB_opaque_GrB_UINT8 , GB_opaque_GrB_INT16 , GB_opaque_GrB_UINT16 , GB_opaque_GrB_INT32 , GB_opaque_GrB_UINT32 , GB_opaque_GrB_INT64 , GB_opaque_GrB_UINT64 , GB_opaque_GrB_FP32 , GB_opaque_GrB_FP64 , GB_opaque_GxB_FC32 , GB_opaque_GxB_FC64 ; //------------------------------------------------------------------------------ // monoid structs //------------------------------------------------------------------------------ GB_PUBLIC struct GB_Monoid_opaque // MIN monoids: GB_opaque_GxB_MIN_INT8_MONOID, // identity: INT8_MAX GB_opaque_GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX GB_opaque_GxB_MIN_INT16_MONOID, // identity: INT16_MAX GB_opaque_GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX GB_opaque_GxB_MIN_INT32_MONOID, // identity: INT32_MAX GB_opaque_GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX GB_opaque_GxB_MIN_INT64_MONOID, // identity: INT64_MAX GB_opaque_GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX GB_opaque_GxB_MIN_FP32_MONOID, // identity: INFINITY GB_opaque_GxB_MIN_FP64_MONOID, // identity: INFINITY // MAX monoids: GB_opaque_GxB_MAX_INT8_MONOID, // identity: INT8_MIN GB_opaque_GxB_MAX_UINT8_MONOID, // identity: 0 GB_opaque_GxB_MAX_INT16_MONOID, // identity: INT16_MIN GB_opaque_GxB_MAX_UINT16_MONOID, // identity: 0 GB_opaque_GxB_MAX_INT32_MONOID, // identity: INT32_MIN GB_opaque_GxB_MAX_UINT32_MONOID, // identity: 0 GB_opaque_GxB_MAX_INT64_MONOID, // identity: INT64_MIN GB_opaque_GxB_MAX_UINT64_MONOID, // identity: 0 GB_opaque_GxB_MAX_FP32_MONOID, // identity: -INFINITY GB_opaque_GxB_MAX_FP64_MONOID, // identity: -INFINITY // PLUS monoids: GB_opaque_GxB_PLUS_INT8_MONOID, // identity: 0 GB_opaque_GxB_PLUS_UINT8_MONOID, // identity: 0 GB_opaque_GxB_PLUS_INT16_MONOID, // identity: 0 GB_opaque_GxB_PLUS_UINT16_MONOID, // identity: 0 GB_opaque_GxB_PLUS_INT32_MONOID, // identity: 0 GB_opaque_GxB_PLUS_UINT32_MONOID, // identity: 0 GB_opaque_GxB_PLUS_INT64_MONOID, // identity: 0 GB_opaque_GxB_PLUS_UINT64_MONOID, // identity: 0 GB_opaque_GxB_PLUS_FP32_MONOID, // identity: 0 GB_opaque_GxB_PLUS_FP64_MONOID, // identity: 0 GB_opaque_GxB_PLUS_FC32_MONOID, // identity: 0 GB_opaque_GxB_PLUS_FC64_MONOID, // identity: 0 // TIMES monoids: GB_opaque_GxB_TIMES_INT8_MONOID, // identity: 1 GB_opaque_GxB_TIMES_UINT8_MONOID, // identity: 1 GB_opaque_GxB_TIMES_INT16_MONOID, // identity: 1 GB_opaque_GxB_TIMES_UINT16_MONOID, // identity: 1 GB_opaque_GxB_TIMES_INT32_MONOID, // identity: 1 GB_opaque_GxB_TIMES_UINT32_MONOID, // identity: 1 GB_opaque_GxB_TIMES_INT64_MONOID, // identity: 1 GB_opaque_GxB_TIMES_UINT64_MONOID, // identity: 1 GB_opaque_GxB_TIMES_FP32_MONOID, // identity: 1 GB_opaque_GxB_TIMES_FP64_MONOID, // identity: 1 GB_opaque_GxB_TIMES_FC32_MONOID, // identity: 1 GB_opaque_GxB_TIMES_FC64_MONOID, // identity: 1 // ANY monoids: GB_opaque_GxB_ANY_INT8_MONOID, GB_opaque_GxB_ANY_UINT8_MONOID, GB_opaque_GxB_ANY_INT16_MONOID, GB_opaque_GxB_ANY_UINT16_MONOID, GB_opaque_GxB_ANY_INT32_MONOID, GB_opaque_GxB_ANY_UINT32_MONOID, GB_opaque_GxB_ANY_INT64_MONOID, GB_opaque_GxB_ANY_UINT64_MONOID, GB_opaque_GxB_ANY_FP32_MONOID, GB_opaque_GxB_ANY_FP64_MONOID, GB_opaque_GxB_ANY_FC32_MONOID, GB_opaque_GxB_ANY_FC64_MONOID, // Boolean monoids: GB_opaque_GxB_ANY_BOOL_MONOID, GB_opaque_GxB_LOR_BOOL_MONOID, // identity: false GB_opaque_GxB_LAND_BOOL_MONOID, // identity: true GB_opaque_GxB_LXOR_BOOL_MONOID, // identity: false GB_opaque_GxB_EQ_BOOL_MONOID, // identity: true // BOR monoids: (bitwise OR) GB_opaque_GxB_BOR_UINT8_MONOID, GB_opaque_GxB_BOR_UINT16_MONOID, GB_opaque_GxB_BOR_UINT32_MONOID, GB_opaque_GxB_BOR_UINT64_MONOID, // BAND monoids: (bitwise and) GB_opaque_GxB_BAND_UINT8_MONOID, GB_opaque_GxB_BAND_UINT16_MONOID, GB_opaque_GxB_BAND_UINT32_MONOID, GB_opaque_GxB_BAND_UINT64_MONOID, // BXOR monoids: (bitwise xor) GB_opaque_GxB_BXOR_UINT8_MONOID, GB_opaque_GxB_BXOR_UINT16_MONOID, GB_opaque_GxB_BXOR_UINT32_MONOID, GB_opaque_GxB_BXOR_UINT64_MONOID, // BXNOR monoids: (bitwise xnor) GB_opaque_GxB_BXNOR_UINT8_MONOID, GB_opaque_GxB_BXNOR_UINT16_MONOID, GB_opaque_GxB_BXNOR_UINT32_MONOID, GB_opaque_GxB_BXNOR_UINT64_MONOID ; //------------------------------------------------------------------------------ // select structs //------------------------------------------------------------------------------ GB_PUBLIC struct GB_SelectOp_opaque GB_opaque_GxB_TRIL, GB_opaque_GxB_TRIU, GB_opaque_GxB_DIAG, GB_opaque_GxB_OFFDIAG, GB_opaque_GxB_NONZERO, GB_opaque_GxB_EQ_ZERO, GB_opaque_GxB_GT_ZERO, GB_opaque_GxB_GE_ZERO, GB_opaque_GxB_LT_ZERO, GB_opaque_GxB_LE_ZERO, GB_opaque_GxB_NE_THUNK, GB_opaque_GxB_EQ_THUNK, GB_opaque_GxB_GT_THUNK, GB_opaque_GxB_GE_THUNK, GB_opaque_GxB_LT_THUNK, GB_opaque_GxB_LE_THUNK ; //------------------------------------------------------------------------------ // error logging and parallel thread control //------------------------------------------------------------------------------ // Error messages are logged in GB_DLEN, on the stack, and then copied into // thread-local storage of size GB_RLEN. If the user-defined data types, // operators, etc have really long names, the error messages are safely // truncated (via snprintf). This is intentional, but gcc with // -Wformat-truncation will print a warning (see pragmas above). Ignore the // warning. // The Context also contains the number of threads to use in the operation. It // is normally determined from the user's descriptor, with a default of // nthreads_max = GxB_DEFAULT (that is, zero). The default rule is to let // GraphBLAS determine the number of threads automatically by selecting a // number of threads between 1 and nthreads_max. GrB_init initializes // nthreads_max to omp_get_max_threads. Both the global value and the value in // a descriptor can set/queried by GxB_set / GxB_get. // Some GrB_Matrix and GrB_Vector methods do not take a descriptor, however // (GrB_*_dup, _build, _exportTuples, _clear, _nvals, _wait, and GxB_*_resize). // For those methods the default rule is always used (nthreads_max = // GxB_DEFAULT), which then relies on the global nthreads_max. #define GB_RLEN 384 #define GB_DLEN 256 typedef struct { double chunk ; // chunk size for small problems int nthreads_max ; // max # of threads to use const char *where ; // GraphBLAS function where error occurred char details [GB_DLEN] ; // error report bool use_mkl ; // control usage of Intel MKL } GB_Context_struct ; typedef GB_Context_struct *GB_Context ; // GB_WHERE keeps track of the currently running user-callable function. // User-callable functions in this implementation are written so that they do // not call other unrelated user-callable functions (except for GrB_*free). // Related user-callable functions can call each other since they all report // the same type-generic name. Internal functions can be called by many // different user-callable functions, directly or indirectly. It would not be // helpful to report the name of an internal function that flagged an error // condition. Thus, each time a user-callable function is entered (except // GrB_*free), it logs the name of the function with the GB_WHERE macro. // GrB_*free does not encounter error conditions so it doesn't need to be // logged by the GB_WHERE macro. #ifndef GB_PANIC #define GB_PANIC return (GrB_PANIC) #endif #define GB_CONTEXT(where_string) \ /* construct the Context */ \ GB_Context_struct Context_struct ; \ GB_Context Context = &Context_struct ; \ /* set Context->where so GrB_error can report it if needed */ \ Context->where = where_string ; \ /* get the default max # of threads and default chunk size */ \ Context->nthreads_max = GB_Global_nthreads_max_get ( ) ; \ Context->chunk = GB_Global_chunk_get ( ) ; \ Context->use_mkl = GB_Global_use_mkl_get ( ) #define GB_WHERE(where_string) \ if (!GB_Global_GrB_init_called_get ( )) \ { \ /* GrB_init (or GxB_init) has not been called! */ \ GB_PANIC ; \ } \ GB_CONTEXT (where_string) //------------------------------------------------------------------------------ // GB_GET_NTHREADS_MAX: determine max # of threads for OpenMP parallelism. //------------------------------------------------------------------------------ // GB_GET_NTHREADS_MAX obtains the max # of threads to use and the chunk // size from the Context. If Context is NULL then a single thread *must* // be used. If Context->nthreads_max is <= GxB_DEFAULT, then select // automatically: between 1 and nthreads_max, depending on the problem // size. Below is the default rule. Any function can use its own rule // instead, based on Context, chunk, nthreads_max, and the problem size. // No rule can exceed nthreads_max. #define GB_GET_NTHREADS_MAX(nthreads_max,chunk,Context) \ int nthreads_max = (Context == NULL) ? 1 : Context->nthreads_max ; \ if (nthreads_max <= GxB_DEFAULT) \ { \ nthreads_max = GB_Global_nthreads_max_get ( ) ; \ } \ double chunk = (Context == NULL) ? GxB_DEFAULT : Context->chunk ; \ if (chunk <= GxB_DEFAULT) \ { \ chunk = GB_Global_chunk_get ( ) ; \ } //------------------------------------------------------------------------------ // GB_nthreads: determine # of threads to use for a parallel loop or region //------------------------------------------------------------------------------ // If work < 2*chunk, then only one thread is used. // else if work < 3*chunk, then two threads are used, and so on. static inline int GB_nthreads // return # of threads to use ( double work, // total work to do double chunk, // give each thread at least this much work int nthreads_max // max # of threads to use ) { work = GB_IMAX (work, 1) ; chunk = GB_IMAX (chunk, 1) ; int64_t nthreads = (int64_t) floor (work / chunk) ; nthreads = GB_IMIN (nthreads, nthreads_max) ; nthreads = GB_IMAX (nthreads, 1) ; return ((int) nthreads) ; } //------------------------------------------------------------------------------ // error logging //------------------------------------------------------------------------------ // The GB_ERROR and GB_LOG macros work together. If an error occurs, the // GB_ERROR macro records the details in the Context.details, and returns the // GrB_info to its 'caller'. This value can then be returned, or set to an // info variable of type GrB_Info. For example: // // if (i >= nrows) // { // return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG, // "Row index %d out of bounds; must be < %d", i, nrows))) ; // } // // The user can then do: // // printf ("%s", GrB_error ( )) ; // // To print details of the error, which includes: which user-callable function // encountered the error, the error status (GrB_INDEX_OUT_OF_BOUNDS), the // details ("Row index 102 out of bounds, must be < 100"). GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only const char *GB_status_code (GrB_Info info) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_error // log an error in thread-local-storage ( GrB_Info info, // error return code from a GraphBLAS function GB_Context Context // pointer to a Context struct, on the stack ) ; // GB_LOG becomes the snprintf_args for GB_ERROR. Unused if Context is NULL. #define GB_LOG Context->details, GB_DLEN // if Context is NULL, do not log the error string in Context->details #define GB_ERROR(info,snprintf_args) \ ( \ ((Context == NULL) ? 0 : snprintf snprintf_args), \ GB_error (info, Context) \ ) // return (GB_OUT_OF_MEMORY) ; reports an out-of-memory error #define GB_OUT_OF_MEMORY GB_ERROR (GrB_OUT_OF_MEMORY, (GB_LOG, "out of memory")) //------------------------------------------------------------------------------ // GraphBLAS check functions: check and optionally print an object //------------------------------------------------------------------------------ // pr values for *_check functions #define GB0 GxB_SILENT #define GB1 GxB_SUMMARY #define GB2 GxB_SHORT #define GB3 GxB_COMPLETE #define GB4 GxB_SHORT_VERBOSE #define GB5 GxB_COMPLETE_VERBOSE // a NULL name is treated as the empty string #define GB_NAME ((name != NULL) ? name : "") GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_entry_check // print a single value ( const GrB_Type type, // type of value to print const void *x, // value to print int pr, // print level FILE *f, // file to print to GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_code_check // print and check an entry using a type code ( const GB_Type_code code, // type code of value to print const void *x, // entry to print int pr, // print level FILE *f, // file to print to GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Type_check // check a GraphBLAS Type ( const GrB_Type type, // GraphBLAS type to print and check const char *name, // name of the type from the caller; optional int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_BinaryOp_check // check a GraphBLAS binary operator ( const GrB_BinaryOp op, // GraphBLAS operator to print and check const char *name, // name of the operator int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_UnaryOp_check // check a GraphBLAS unary operator ( const GrB_UnaryOp op, // GraphBLAS operator to print and check const char *name, // name of the operator int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_SelectOp_check // check a GraphBLAS select operator ( const GxB_SelectOp op, // GraphBLAS operator to print and check const char *name, // name of the operator int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Monoid_check // check a GraphBLAS monoid ( const GrB_Monoid monoid, // GraphBLAS monoid to print and check const char *name, // name of the monoid, optional int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Semiring_check // check a GraphBLAS semiring ( const GrB_Semiring semiring, // GraphBLAS semiring to print and check const char *name, // name of the semiring, optional int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Descriptor_check // check a GraphBLAS descriptor ( const GrB_Descriptor D, // GraphBLAS descriptor to print and check const char *name, // name of the descriptor, optional int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_matvec_check // check a GraphBLAS matrix or vector ( const GrB_Matrix A, // GraphBLAS matrix to print and check const char *name, // name of the matrix, optional int pr, // print level; if negative, ignore nzombie // conditions and use GB_FLIP(pr) for diagnostics FILE *f, // file for output const char *kind, // "matrix" or "vector" GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Matrix_check // check a GraphBLAS matrix ( const GrB_Matrix A, // GraphBLAS matrix to print and check const char *name, // name of the matrix int pr, // print level FILE *f, // file for output GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Vector_check // check a GraphBLAS vector ( const GrB_Vector v, // GraphBLAS vector to print and check const char *name, // name of the vector int pr, // print level FILE *f, // file for output GB_Context Context ) ; GrB_Info GB_Scalar_check // check a GraphBLAS GxB_Scalar ( const GxB_Scalar v, // GraphBLAS GxB_Scalar to print and check const char *name, // name of the GxB_Scalar int pr, // print level FILE *f, // file for output GB_Context Context ) ; //------------------------------------------------------------------------------ // internal GraphBLAS functions //------------------------------------------------------------------------------ GrB_Info GB_init // start up GraphBLAS ( const GrB_Mode mode, // blocking or non-blocking mode // pointers to memory management functions. Must be non-NULL. void * (* malloc_function ) (size_t), void * (* calloc_function ) (size_t, size_t), void * (* realloc_function ) (void *, size_t), void (* free_function ) (void *), bool malloc_is_thread_safe, bool caller_is_GxB_cuda_init, // true for GxB_cuda_init only GB_Context Context // from GrB_init or GxB_init ) ; typedef enum // input parameter to GB_new and GB_create { GB_Ap_calloc, // 0: calloc A->p, malloc A->h if hypersparse GB_Ap_malloc, // 1: malloc A->p, malloc A->h if hypersparse GB_Ap_null // 2: do not allocate A->p or A->h } GB_Ap_code ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_new // create matrix, except for indices & values ( GrB_Matrix *Ahandle, // handle of matrix to create const GrB_Type type, // matrix type const int64_t vlen, // length of each vector const int64_t vdim, // number of vectors const GB_Ap_code Ap_option, // allocate A->p and A->h, or leave NULL const bool is_csc, // true if CSC, false if CSR const int hyper_option, // 1:hyper, 0:nonhyper, -1:auto const double hyper_ratio, // A->hyper_ratio, unless auto const int64_t plen, // size of A->p and A->h, if A hypersparse. // Ignored if A is not hypersparse. GB_Context Context ) ; GrB_Info GB_create // create a new matrix, including A->i and A->x ( GrB_Matrix *Ahandle, // output matrix to create const GrB_Type type, // type of output matrix const int64_t vlen, // length of each vector const int64_t vdim, // number of vectors const GB_Ap_code Ap_option, // allocate A->p and A->h, or leave NULL const bool is_csc, // true if CSC, false if CSR const int hyper_option, // 1:hyper, 0:nonhyper, -1:auto const double hyper_ratio, // A->hyper_ratio, unless auto const int64_t plen, // size of A->p and A->h, if hypersparse const int64_t anz, // number of nonzeros the matrix must hold const bool numeric, // if true, allocate A->x, else A->x is NULL GB_Context Context ) ; GrB_Info GB_hyper_realloc ( GrB_Matrix A, // matrix with hyperlist to reallocate int64_t plen_new, // new size of A->p and A->h GB_Context Context ) ; GrB_Info GB_clear // clear a matrix, type and dimensions unchanged ( GrB_Matrix A, // matrix to clear GB_Context Context ) ; GrB_Info GB_dup // make an exact copy of a matrix ( GrB_Matrix *Chandle, // handle of output matrix to create const GrB_Matrix A, // input matrix to copy const bool numeric, // if true, duplicate the numeric values const GrB_Type ctype, // type of C, if numeric is false GB_Context Context ) ; GrB_Info GB_dup2 // make an exact copy of a matrix ( GrB_Matrix *Chandle, // handle of output matrix to create const GrB_Matrix A, // input matrix to copy const bool numeric, // if true, duplicate the numeric values const GrB_Type ctype, // type of C, if numeric is false GB_Context Context ) ; void GB_memcpy // parallel memcpy ( void *dest, // destination const void *src, // source size_t n, // # of bytes to copy int nthreads // # of threads to use ) ; GrB_Info GB_nvals // get the number of entries in a matrix ( GrB_Index *nvals, // matrix has nvals entries const GrB_Matrix A, // matrix to query GB_Context Context ) ; GrB_Info GB_matvec_type // get the type of a matrix ( GrB_Type *type, // returns the type of the matrix const GrB_Matrix A, // matrix to query GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_ix_alloc // allocate A->i and A->x space in a matrix ( GrB_Matrix A, // matrix to allocate space for const GrB_Index nzmax, // number of entries the matrix can hold const bool numeric, // if true, allocate A->x, otherwise A->x is NULL GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_ix_realloc // reallocate space in a matrix ( GrB_Matrix A, // matrix to allocate space for const GrB_Index nzmax, // new number of entries the matrix can hold const bool numeric, // if true, reallocate A->x, otherwise A->x is NULL GB_Context Context ) ; GrB_Info GB_ix_resize // resize a matrix ( GrB_Matrix A, const int64_t anz_new, // required new nnz(A) GB_Context Context ) ; // free A->i and A->x and return if critical section fails #define GB_IX_FREE(A) \ if (GB_ix_free (A) == GrB_PANIC) GB_PANIC GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_ix_free // free A->i and A->x of a matrix ( GrB_Matrix A // matrix with content to free ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_ph_free // free A->p and A->h of a matrix ( GrB_Matrix A // matrix with content to free ) ; // free all content, and return if critical section fails #define GB_PHIX_FREE(A) \ if (GB_phix_free (A) == GrB_PANIC) GB_PANIC GrB_Info GB_phix_free // free all content of a matrix ( GrB_Matrix A // matrix with content to free ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_Type_compatible // check if two types can be typecast ( const GrB_Type atype, const GrB_Type btype ) ; //------------------------------------------------------------------------------ // GB_code_compatible: return true if domains are compatible //------------------------------------------------------------------------------ // Two domains are compatible for typecasting between them if both are built-in // types (of any kind) or if both are the same user-defined type. This // function does not have the type itself, but just the code. If the types are // available, GB_Type_compatible should be called instead. static inline bool GB_code_compatible // true if two types can be typecast ( const GB_Type_code acode, // type code of a const GB_Type_code bcode // type code of b ) { bool a_user = (acode == GB_UDT_code) ; bool b_user = (bcode == GB_UDT_code) ; if (a_user || b_user) { // both a and b must be user-defined. They should be the same // user-defined type, but the caller does not have the actual type, // just the code. return (a_user && b_user) ; } else { // any built-in domain is compatible with any other built-in domain return (true) ; } } //------------------------------------------------------------------------------ // GB_task_struct: parallel task descriptor //------------------------------------------------------------------------------ // The element-wise computations (GB_add, GB_emult, and GB_mask) compute // C(:,j)<M(:,j)> = op (A (:,j), B(:,j)). They are parallelized by slicing the // work into tasks, described by the GB_task_struct. // There are two kinds of tasks. For a coarse task, kfirst <= klast, and the // task computes all vectors in C(:,kfirst:klast), inclusive. None of the // vectors are sliced and computed by other tasks. For a fine task, klast is // -1. The task computes part of the single vector C(:,kfirst). It starts at // pA in Ai,Ax, at pB in Bi,Bx, and (if M is present) at pM in Mi,Mx. It // computes C(:,kfirst), starting at pC in Ci,Cx. // GB_subref also uses the TaskList. It has 12 kinds of fine tasks, // corresponding to each of the 12 methods used in GB_subref_template. For // those fine tasks, method = -TaskList [taskid].klast defines the method to // use. // The GB_subassign functions use the TaskList, in many different ways. typedef struct // task descriptor { int64_t kfirst ; // C(:,kfirst) is the first vector in this task. int64_t klast ; // C(:,klast) is the last vector in this task. int64_t pC ; // fine task starts at Ci, Cx [pC] int64_t pC_end ; // fine task ends at Ci, Cx [pC_end-1] int64_t pM ; // fine task starts at Mi, Mx [pM] int64_t pM_end ; // fine task ends at Mi, Mx [pM_end-1] int64_t pA ; // fine task starts at Ai, Ax [pA] int64_t pA_end ; // fine task ends at Ai, Ax [pA_end-1] int64_t pB ; // fine task starts at Bi, Bx [pB] int64_t pB_end ; // fine task ends at Bi, Bx [pB_end-1] int64_t len ; // fine task handles a subvector of this length } GB_task_struct ; // GB_REALLOC_TASK_LIST: Allocate or reallocate the TaskList so that it can // hold at least ntasks. Double the size if it's too small. #define GB_REALLOC_TASK_LIST(TaskList,ntasks,max_ntasks) \ { \ if ((ntasks) >= max_ntasks) \ { \ bool ok ; \ int nold = (max_ntasks == 0) ? 0 : (max_ntasks + 1) ; \ int nnew = 2 * (ntasks) + 1 ; \ TaskList = GB_REALLOC (TaskList, nnew, nold, GB_task_struct, &ok) ; \ if (!ok) \ { \ /* out of memory */ \ GB_FREE_ALL ; \ return (GB_OUT_OF_MEMORY) ; \ } \ for (int t = nold ; t < nnew ; t++) \ { \ TaskList [t].kfirst = -1 ; \ TaskList [t].klast = INT64_MIN ; \ TaskList [t].pA = INT64_MIN ; \ TaskList [t].pA_end = INT64_MIN ; \ TaskList [t].pB = INT64_MIN ; \ TaskList [t].pB_end = INT64_MIN ; \ TaskList [t].pC = INT64_MIN ; \ TaskList [t].pC_end = INT64_MIN ; \ TaskList [t].pM = INT64_MIN ; \ TaskList [t].pM_end = INT64_MIN ; \ TaskList [t].len = INT64_MIN ; \ } \ max_ntasks = 2 * (ntasks) ; \ } \ ASSERT ((ntasks) < max_ntasks) ; \ } GrB_Info GB_ewise_slice ( // output: GB_task_struct **p_TaskList, // array of structs, of size max_ntasks int *p_max_ntasks, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads to use // input: const int64_t Cnvec, // # of vectors of C const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse const int64_t *GB_RESTRICT C_to_M, // mapping of C to M const int64_t *GB_RESTRICT C_to_A, // mapping of C to A const int64_t *GB_RESTRICT C_to_B, // mapping of C to B bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only const GrB_Matrix M, // mask matrix to slice (optional) const GrB_Matrix A, // matrix to slice const GrB_Matrix B, // matrix to slice GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_slice_vector ( // output: return i, pA, and pB int64_t *p_i, // work starts at A(i,kA) and B(i,kB) int64_t *p_pM, // M(i:end,kM) starts at pM int64_t *p_pA, // A(i:end,kA) starts at pA int64_t *p_pB, // B(i:end,kB) starts at pB // input: const int64_t pM_start, // M(:,kM) starts at pM_start in Mi,Mx const int64_t pM_end, // M(:,kM) ends at pM_end-1 in Mi,Mx const int64_t *GB_RESTRICT Mi, // indices of M (or NULL) const int64_t pA_start, // A(:,kA) starts at pA_start in Ai,Ax const int64_t pA_end, // A(:,kA) ends at pA_end-1 in Ai,Ax const int64_t *GB_RESTRICT Ai, // indices of A const int64_t A_hfirst, // if Ai is an implicit hyperlist const int64_t pB_start, // B(:,kB) starts at pB_start in Bi,Bx const int64_t pB_end, // B(:,kB) ends at pB_end-1 in Bi,Bx const int64_t *GB_RESTRICT Bi, // indices of B const int64_t vlen, // A->vlen and B->vlen const double target_work // target work ) ; void GB_task_cumsum ( int64_t *Cp, // size Cnvec+1 const int64_t Cnvec, int64_t *Cnvec_nonempty, // # of non-empty vectors in C GB_task_struct *GB_RESTRICT TaskList, // array of structs const int ntasks, // # of tasks const int nthreads // # of threads ) ; //------------------------------------------------------------------------------ // GB_GET_VECTOR: get the content of a vector for a coarse/fine task //------------------------------------------------------------------------------ #define GB_GET_VECTOR(pX_start, pX_fini, pX, pX_end, Xp, kX) \ int64_t pX_start, pX_fini ; \ if (fine_task) \ { \ /* A fine task operates on a slice of X(:,k) */ \ pX_start = TaskList [taskid].pX ; \ pX_fini = TaskList [taskid].pX_end ; \ } \ else \ { \ /* vectors are never sliced for a coarse task */ \ pX_start = Xp [kX] ; \ pX_fini = Xp [kX+1] ; \ } //------------------------------------------------------------------------------ GrB_Info GB_transplant // transplant one matrix into another ( GrB_Matrix C, // output matrix to overwrite with A const GrB_Type ctype, // new type of C GrB_Matrix *Ahandle, // input matrix to copy from and free GB_Context Context ) ; GrB_Info GB_transplant_conform // transplant and conform hypersparsity ( GrB_Matrix C, // destination matrix to transplant into GrB_Type ctype, // type to cast into GrB_Matrix *Thandle, // source matrix GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only size_t GB_code_size // return the size of a type, given its code ( const GB_Type_code code, // input code of the type to find the size of const size_t usize // known size of user-defined type ) ; //------------------------------------------------------------------------------ // memory management //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void *GB_calloc_memory // pointer to allocated block of memory ( size_t nitems, // number of items to allocate size_t size_of_item // sizeof each item ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void *GB_malloc_memory // pointer to allocated block of memory ( size_t nitems, // number of items to allocate size_t size_of_item // sizeof each item ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void *GB_realloc_memory // pointer to reallocated block of memory, or // to original block if the realloc failed. ( size_t nitems_new, // new number of items in the object size_t nitems_old, // old number of items in the object size_t size_of_item, // sizeof each item void *p, // old object to reallocate bool *ok // true if successful, false otherwise ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_free_memory ( void *p // pointer to allocated block of memory to free ) ; #define GB_FREE(p) \ { \ GB_free_memory ((void *) p) ; \ (p) = NULL ; \ } #define GB_CALLOC(n,type) (type *) GB_calloc_memory (n, sizeof (type)) #define GB_MALLOC(n,type) (type *) GB_malloc_memory (n, sizeof (type)) #define GB_REALLOC(p,nnew,nold,type,ok) \ p = (type *) GB_realloc_memory (nnew, nold, sizeof (type), (void *) p, ok) //------------------------------------------------------------------------------ // macros to create/free matrices, vectors, and scalars //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Matrix_free // free a matrix ( GrB_Matrix *matrix_handle // handle of matrix to free ) ; #define GB_MATRIX_FREE(A) \ { \ if (GB_Matrix_free (A) == GrB_PANIC) GB_PANIC ; \ } #define GB_VECTOR_FREE(v) GB_MATRIX_FREE ((GrB_Matrix *) v) #define GB_SCALAR_FREE(s) GB_MATRIX_FREE ((GrB_Matrix *) s) //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Type GB_code_type // return the GrB_Type corresponding to the code ( const GB_Type_code code, // type code to convert const GrB_Type type // user type if code is GB_UDT_code ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_slice // slice B into nthreads slices or hyperslices ( GrB_Matrix B, // matrix to slice int nthreads, // # of slices to create int64_t *Slice, // array of size nthreads+1 that defines the slice GrB_Matrix *Bslice, // array of output slices, of size nthreads GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_pslice // slice Ap; return true if ok, false if out of memory ( int64_t *GB_RESTRICT *Slice_handle, // size ntasks+1 const int64_t *GB_RESTRICT Ap, // array of size n+1 const int64_t n, const int ntasks // # of tasks ) ; void GB_eslice ( // output: int64_t *Slice, // array of size ntasks+1 // input: int64_t e, // number items to partition amongst the tasks const int ntasks // # of tasks ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_cumsum // cumulative sum of an array ( int64_t *GB_RESTRICT count, // size n+1, input/output const int64_t n, int64_t *GB_RESTRICT kresult, // return k, if needed by the caller int nthreads ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Descriptor_get // get the contents of a descriptor ( const GrB_Descriptor desc, // descriptor to query, may be NULL bool *C_replace, // if true replace C before C<M>=Z bool *Mask_comp, // if true use logical negation of M bool *Mask_struct, // if true use the structure of M bool *In0_transpose, // if true transpose first input bool *In1_transpose, // if true transpose second input GrB_Desc_Value *AxB_method, // method for C=A*B GB_Context Context ) ; GrB_Info GB_compatible // SUCCESS if all is OK, *_MISMATCH otherwise ( const GrB_Type ctype, // the type of C (matrix or scalar) const GrB_Matrix C, // the output matrix C; NULL if C is a scalar const GrB_Matrix M, // optional mask, NULL if no mask const GrB_BinaryOp accum, // C<M> = accum(C,T) is computed const GrB_Type ttype, // type of T GB_Context Context ) ; GrB_Info GB_Mask_compatible // check type and dimensions of mask ( const GrB_Matrix M, // mask to check const GrB_Matrix C, // C<M>= ... const GrB_Index nrows, // size of output if C is NULL (see GB*assign) const GrB_Index ncols, GB_Context Context ) ; GrB_Info GB_BinaryOp_compatible // check for domain mismatch ( const GrB_BinaryOp op, // binary operator to check const GrB_Type ctype, // C must be compatible with op->ztype const GrB_Type atype, // A must be compatible with op->xtype const GrB_Type btype, // B must be compatible with op->ytype const GB_Type_code bcode, // B may not have a type, just a code GB_Context Context ) ; // Several methods can use choose between a qsort-based method that takes // O(anz*log(anz)) time, or a bucket-sort method that takes O(anz+n) time. // The qsort method is choosen if the following condition is true: #define GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET(anz,n) ((16 * (anz)) < (n)) GB_PUBLIC // accessed by the MATLAB interface only bool GB_Index_multiply // true if ok, false if overflow ( GrB_Index *GB_RESTRICT c, // c = a*b, or zero if overflow occurs const int64_t a, const int64_t b ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_size_t_multiply // true if ok, false if overflow ( size_t *c, // c = a*b, or zero if overflow occurs const size_t a, const size_t b ) ; bool GB_extract_vector_list // true if successful, false if out of memory ( // output: int64_t *GB_RESTRICT J, // size nnz(A) or more // input: const GrB_Matrix A, int nthreads ) ; GrB_Info GB_extractTuples // extract all tuples from a matrix ( GrB_Index *I_out, // array for returning row indices of tuples GrB_Index *J_out, // array for returning col indices of tuples void *X, // array for returning values of tuples GrB_Index *p_nvals, // I,J,X size on input; # tuples on output const GB_Type_code xcode, // type of array X const GrB_Matrix A, // matrix to extract tuples from GB_Context Context ) ; GrB_Info GB_Monoid_new // create a monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid const void *identity, // identity value const void *terminal, // terminal value, if any (may be NULL) const GB_Type_code idcode, // identity and terminal type code GB_Context Context ) ; //------------------------------------------------------------------------------ // GB_is_dense: check if a matrix is completely dense //------------------------------------------------------------------------------ static inline bool GB_is_dense ( const GrB_Matrix A ) { // check if A is competely dense: all entries present. // zombies and pending tuples are not considered if (A == NULL) return (false) ; GrB_Index anzmax ; bool ok = GB_Index_multiply (&anzmax, A->vlen, A->vdim) ; return (ok && (anzmax == GB_NNZ (A))) ; } //------------------------------------------------------------------------------ // OpenMP definitions //------------------------------------------------------------------------------ // GB_PART and GB_PARTITION: divide the index range 0:n-1 uniformly // for nthreads. GB_PART(tid,n,nthreads) is the first index for thread tid. #define GB_PART(tid,n,nthreads) \ (((tid) * ((double) (n))) / ((double) (nthreads))) // thread tid will operate on the range k1:(k2-1) #define GB_PARTITION(k1,k2,n,tid,nthreads) \ k1 = ((tid) == 0 ) ? 0 : GB_PART ((tid), n, nthreads) ; \ k2 = ((tid) == (nthreads)-1) ? (n) : GB_PART ((tid)+1,n, nthreads) #if defined ( _OPENMP ) #include <omp.h> #define GB_OPENMP_MAX_THREADS omp_get_max_threads ( ) #define GB_OPENMP_GET_NUM_THREADS omp_get_num_threads ( ) #define GB_OPENMP_GET_WTIME omp_get_wtime ( ) #else #define GB_OPENMP_MAX_THREADS (1) #define GB_OPENMP_GET_NUM_THREADS (1) #define GB_OPENMP_GET_WTIME (0) #endif // by default, give each thread at least 64K units of work to do #define GB_CHUNK_DEFAULT (64*1024) //------------------------------------------------------------------------------ GrB_Info GB_setElement // set a single entry, C(row,col) = scalar ( GrB_Matrix C, // matrix to modify void *scalar, // scalar to set const GrB_Index row, // row index const GrB_Index col, // column index const GB_Type_code scalar_code, // type of the scalar GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_block // apply all pending computations if blocking mode enabled ( GrB_Matrix A, GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_op_is_second // return true if op is SECOND, of the right type ( GrB_BinaryOp op, GrB_Type type ) ; //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only char *GB_code_string // return a static string for a type name ( const GB_Type_code code // code to convert to string ) ; GrB_Info GB_resize // change the size of a matrix ( GrB_Matrix A, // matrix to modify const GrB_Index nrows_new, // new number of rows in matrix const GrB_Index ncols_new, // new number of columns in matrix GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int64_t GB_nvec_nonempty // return # of non-empty vectors ( const GrB_Matrix A, // input matrix to examine GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_to_nonhyper // convert a matrix to non-hypersparse ( GrB_Matrix A, // matrix to convert to non-hypersparse GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_to_hyper // convert a matrix to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) ; bool GB_to_nonhyper_test // test for conversion to hypersparse ( GrB_Matrix A, // matrix to test int64_t k, // # of non-empty vectors of A, an estimate is OK, // but normally A->nvec_nonempty int64_t vdim // normally A->vdim ) ; bool GB_to_hyper_test // test for conversion to hypersparse ( GrB_Matrix A, // matrix to test int64_t k, // # of non-empty vectors of A, an estimate is OK, // but normally A->nvec_nonempty int64_t vdim // normally A->vdim ) ; GrB_Info GB_to_hyper_conform // conform a matrix to its desired format ( GrB_Matrix A, // matrix to conform GB_Context Context ) ; GrB_Info GB_hyper_prune ( // output, not allocated on input: int64_t *GB_RESTRICT *p_Ap, // size nvec+1 int64_t *GB_RESTRICT *p_Ah, // size nvec int64_t *p_nvec, // # of vectors, all nonempty // input, not modified const int64_t *Ap_old, // size nvec_old+1 const int64_t *Ah_old, // size nvec_old const int64_t nvec_old, // original number of vectors GB_Context Context ) ; GrB_Info GB_hypermatrix_prune ( GrB_Matrix A, // matrix to prune GB_Context Context ) ; GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_cast_array // typecast an array ( GB_void *Cx, // output array const GB_Type_code code1, // type code for Cx GB_void *Ax, // input array const GB_Type_code code2, // type code for Ax const size_t user_size, // size of Ax and Cx if user-defined const int64_t anz, // number of entries in Cx and Ax const int nthreads // number of threads to use ) ; //------------------------------------------------------------------------------ // boiler plate macros for checking inputs and returning if an error occurs //------------------------------------------------------------------------------ // Functions use these macros to check/get their inputs and return an error // if something has gone wrong. #define GB_OK(method) \ { \ info = method ; \ if (info != GrB_SUCCESS) \ { \ GB_FREE_ALL ; \ return (info) ; \ } \ } // check if a required arg is NULL #define GB_RETURN_IF_NULL(arg) \ if ((arg) == NULL) \ { \ /* the required arg is NULL */ \ return (GB_ERROR (GrB_NULL_POINTER, (GB_LOG, \ "Required argument is null: [%s]", GB_STR(arg)))) ; \ } // arg may be NULL, but if non-NULL then it must be initialized #define GB_RETURN_IF_FAULTY(arg) \ if ((arg) != NULL && (arg)->magic != GB_MAGIC) \ { \ if ((arg)->magic == GB_MAGIC2) \ { \ /* optional arg is not NULL, but invalid */ \ return (GB_ERROR (GrB_INVALID_OBJECT, (GB_LOG, \ "Argument is invalid: [%s]", GB_STR(arg)))) ; \ } \ else \ { \ /* optional arg is not NULL, but not initialized */ \ return (GB_ERROR (GrB_UNINITIALIZED_OBJECT, (GB_LOG, \ "Argument is uninitialized: [%s]", GB_STR(arg)))) ; \ } \ } // arg must not be NULL, and it must be initialized #define GB_RETURN_IF_NULL_OR_FAULTY(arg) \ GB_RETURN_IF_NULL (arg) ; \ GB_RETURN_IF_FAULTY (arg) ; // same as GB_RETURN_IF_NULL(arg), but set Context first #define GB_CONTEXT_RETURN_IF_NULL(arg) \ if ((arg) == NULL) \ { \ /* the required arg is NULL */ \ GB_WHERE (GB_WHERE_STRING) ; \ return (GB_ERROR (GrB_NULL_POINTER, (GB_LOG, \ "Required argument is null: [%s]", GB_STR(arg)))) ; \ } // same as GB_RETURN_IF_FAULTY(arg), but set Context first #define GB_CONTEXT_RETURN_IF_FAULTY(arg) \ if ((arg) != NULL && (arg)->magic != GB_MAGIC) \ { \ GB_WHERE (GB_WHERE_STRING) ; \ if ((arg)->magic == GB_MAGIC2) \ { \ /* optional arg is not NULL, but invalid */ \ return (GB_ERROR (GrB_INVALID_OBJECT, (GB_LOG, \ "Argument is invalid: [%s]", GB_STR(arg)))) ; \ } \ else \ { \ /* optional arg is not NULL, but not initialized */ \ return (GB_ERROR (GrB_UNINITIALIZED_OBJECT, (GB_LOG, \ "Argument is uninitialized: [%s]", GB_STR(arg)))) ; \ } \ } // check the descriptor and extract its contents; also copies // nthreads_max, chunk, and use_mkl from the descriptor to the Context #define GB_GET_DESCRIPTOR(info,desc,dout,dmc,dms,d0,d1,dalgo) \ GrB_Info info ; \ bool dout, dmc, dms, d0, d1 ; \ GrB_Desc_Value dalgo ; \ /* if desc is NULL then defaults are used. This is OK */ \ info = GB_Descriptor_get (desc, &dout, &dmc, &dms, &d0, &d1, &dalgo, \ Context) ; \ if (info != GrB_SUCCESS) \ { \ /* desc not NULL, but uninitialized or an invalid object */ \ return (info) ; \ } // C<M>=Z ignores Z if an empty mask is complemented, so return from // the method without computing anything. But do apply the mask. #define GB_RETURN_IF_QUICK_MASK(C, C_replace, M, Mask_comp) \ if (Mask_comp && M == NULL) \ { \ /* C<!NULL>=NULL since result does not depend on computing Z */ \ return (C_replace ? GB_clear (C, Context) : GrB_SUCCESS) ; \ } // GB_MASK_VERY_SPARSE is true if C<M>=A+B or C<M>=accum(C,T) is being // computed, and the mask M is very sparse compared with A and B. #define GB_MASK_VERY_SPARSE(M,A,B) (8 * GB_NNZ (M) < GB_NNZ (A) + GB_NNZ (B)) //------------------------------------------------------------------------------ // Pending upddate and zombies //------------------------------------------------------------------------------ // GB_FLIP is a kind of "negation" about (-1) of a zero-based index. // If i >= 0 then it is not flipped. // If i < 0 then it has been flipped. // Like negation, GB_FLIP is its own inverse: GB_FLIP (GB_FLIP (i)) == i. // The "nil" value, -1, doesn't change when flipped: GB_FLIP (-1) = -1. // GB_UNFLIP(i) is like taking an absolute value, undoing any GB_FLIP(i). // An entry A(i,j) in a matrix can be marked as a "zombie". A zombie is an // entry that has been marked for deletion, but hasn't been deleted yet because // it's more efficient to delete all zombies all at once, instead of one at a // time. Zombies are created by submatrix assignment, C(I,J)=A which copies // not only new entries into C, but it also deletes entries already present in // C. If an entry appears in A but not C(I,J), it is a new entry; new entries // placed in the pending tuple lists to be added later. If an entry appear in // C(I,J) but NOT in A, then it is marked for deletion by flipping its row // index, marking it as a zombie. // Zombies can be restored as regular entries by GrB_*assign. If an assignment // C(I,J)=A finds an entry in A that is a zombie in C, the zombie becomes a // regular entry, taking on the value from A. The row index is unflipped. // Zombies are deleted and pending tuples are added into the matrix all at // once, by GB_Matrix_wait. GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_Matrix_wait // finish all pending computations ( GrB_Matrix A, // matrix with pending computations GB_Context Context ) ; #define GB_FLIP(i) (-(i)-2) #define GB_IS_FLIPPED(i) ((i) < 0) #define GB_IS_ZOMBIE(i) ((i) < 0) #define GB_IS_NOT_FLIPPED(i) ((i) >= 0) #define GB_IS_NOT_ZOMBIE(i) ((i) >= 0) #define GB_UNFLIP(i) (((i) < 0) ? GB_FLIP(i) : (i)) // true if a matrix has pending tuples #define GB_PENDING(A) ((A) != NULL && (A)->Pending != NULL) // true if a matrix is allowed to have pending tuples #define GB_PENDING_OK(A) (GB_PENDING (A) || !GB_PENDING (A)) // true if a matrix has zombies #define GB_ZOMBIES(A) ((A) != NULL && (A)->nzombies > 0) // true if a matrix is allowed to have zombies #define GB_ZOMBIES_OK(A) (((A) == NULL) || ((A) != NULL && (A)->nzombies >= 0)) // true if a matrix has pending tuples or zombies #define GB_PENDING_OR_ZOMBIES(A) (GB_PENDING (A) || GB_ZOMBIES (A)) // do all pending updates: delete zombies and assemble any pending tuples #define GB_MATRIX_WAIT(A) \ { \ if (GB_PENDING_OR_ZOMBIES (A)) \ { \ GB_OK (GB_Matrix_wait ((GrB_Matrix) A, Context)) ; \ ASSERT (!GB_ZOMBIES (A)) ; \ ASSERT (!GB_PENDING (A)) ; \ } \ } #define GB_VECTOR_WAIT(v) GB_MATRIX_WAIT (v) #define GB_SCALAR_WAIT(s) GB_MATRIX_WAIT (s) // do all pending updates: but only if pending tuples; zombies are OK #define GB_MATRIX_WAIT_PENDING(A) \ { \ if (GB_PENDING (A)) \ { \ /* do all pending work: delete zombies and assemble pending tuples */ \ GB_OK (GB_Matrix_wait ((GrB_Matrix) A, Context)) ; \ ASSERT (!GB_ZOMBIES (A)) ; \ ASSERT (!GB_PENDING (A)) ; \ } \ ASSERT (GB_ZOMBIES_OK (A)) ; \ } // true if a matrix has no entries; zombies OK #define GB_EMPTY(A) ((GB_NNZ (A) == 0) && !GB_PENDING (A)) //------------------------------------------------------------------------------ // built-in unary and binary operators //------------------------------------------------------------------------------ #define GB_TYPE bool #define GB_REAL #define GB_BOOLEAN #define GB(x) GB_ ## x ## _BOOL #define GB_BITS 1 #include "GB_ops_template.h" #define GB_TYPE int8_t #define GB_REAL #define GB_SIGNED_INT #define GB(x) GB_ ## x ## _INT8 #define GB_BITS 8 #include "GB_ops_template.h" #define GB_TYPE uint8_t #define GB_REAL #define GB_UNSIGNED_INT #define GB(x) GB_ ## x ## _UINT8 #define GB_BITS 8 #include "GB_ops_template.h" #define GB_TYPE int16_t #define GB_REAL #define GB_SIGNED_INT #define GB(x) GB_ ## x ## _INT16 #define GB_BITS 16 #include "GB_ops_template.h" #define GB_TYPE uint16_t #define GB_REAL #define GB_UNSIGNED_INT #define GB(x) GB_ ## x ## _UINT16 #define GB_BITS 16 #include "GB_ops_template.h" #define GB_TYPE int32_t #define GB_REAL #define GB_SIGNED_INT #define GB(x) GB_ ## x ## _INT32 #define GB_BITS 32 #include "GB_ops_template.h" #define GB_TYPE uint32_t #define GB_REAL #define GB_UNSIGNED_INT #define GB(x) GB_ ## x ## _UINT32 #define GB_BITS 32 #include "GB_ops_template.h" #define GB_TYPE int64_t #define GB_REAL #define GB_SIGNED_INT #define GB(x) GB_ ## x ## _INT64 #define GB_BITS 64 #include "GB_ops_template.h" #define GB_TYPE uint64_t #define GB_REAL #define GB_UNSIGNED_INT #define GB(x) GB_ ## x ## _UINT64 #define GB_BITS 64 #include "GB_ops_template.h" #define GB_TYPE float #define GB_REAL #define GB_FLOATING_POINT #define GB_FLOAT #define GB(x) GB_ ## x ## _FP32 #define GB_BITS 32 #include "GB_ops_template.h" #define GB_TYPE double #define GB_REAL #define GB_FLOATING_POINT #define GB_DOUBLE #define GB(x) GB_ ## x ## _FP64 #define GB_BITS 64 #include "GB_ops_template.h" #define GB_TYPE GxB_FC32_t #define GB_COMPLEX #define GB_FLOATING_POINT #define GB_FLOAT_COMPLEX #define GB(x) GB_ ## x ## _FC32 #define GB_BITS 64 #include "GB_ops_template.h" #define GB_TYPE GxB_FC64_t #define GB_COMPLEX #define GB_FLOATING_POINT #define GB_DOUBLE_COMPLEX #define GB(x) GB_ ## x ## _FC64 #define GB_BITS 128 #include "GB_ops_template.h" #define GB_opaque_GrB_LNOT GB_opaque_GxB_LNOT_BOOL #define GB_opaque_GrB_LOR GB_opaque_GxB_LOR_BOOL #define GB_opaque_GrB_LAND GB_opaque_GxB_LAND_BOOL #define GB_opaque_GrB_LXOR GB_opaque_GxB_LXOR_BOOL #define GB_opaque_GrB_LXNOR GB_opaque_GxB_LXNOR_BOOL //------------------------------------------------------------------------------ // CUDA (DRAFT: in progress) //------------------------------------------------------------------------------ #include "GB_cuda_gateway.h" #endif
irbuilder_nested_parallel_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -verify -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -o - | FileCheck --check-prefixes=CHECK %s // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -debug-info-kind=limited -std=c++11 -verify %s -emit-llvm -o - | FileCheck --check-prefixes=CHECK-DEBUG %s // expected-no-diagnostics // TODO: Teach the update script to check new functions too. #ifndef HEADER #define HEADER // CHECK-LABEL: @_Z14parallel_for_0v( // CHECK-NEXT: entry: // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] // CHECK: omp.par.outlined.exit: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_0v( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]), !dbg [[DBG13:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @_Z14parallel_for_0v..omp_par to void (i32*, i32*, ...)*)), !dbg [[DBG14:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT:%.*]] // CHECK-DEBUG: omp.par.outlined.exit: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: ret void, !dbg [[DBG18:![0-9]+]] // void parallel_for_0(void) { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) { } } } // CHECK-LABEL: @_Z14parallel_for_1Pfid( // CHECK-NEXT: entry: // CHECK-NEXT: [[STRUCTARG17:%.*]] = alloca { { i32*, double*, float** }*, i32*, double*, float** }, align 8 // CHECK-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: [[GEP_STRUCTARG:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 0 // CHECK-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG]], align 8 // CHECK-NEXT: [[GEP_A_ADDR18:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 1 // CHECK-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR18]], align 8 // CHECK-NEXT: [[GEP_B_ADDR19:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 2 // CHECK-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR19]], align 8 // CHECK-NEXT: [[GEP_R_ADDR20:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 3 // CHECK-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR20]], align 8 // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { { i32*, double*, float** }*, i32*, double*, float** }*)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]]) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]] // CHECK: omp.par.outlined.exit16: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_1Pfid( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[STRUCTARG17:%.*]] = alloca { { i32*, double*, float** }*, i32*, double*, float** }, align 8 // CHECK-DEBUG-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META72:![0-9]+]], metadata !DIExpression()), !dbg [[DBG73:![0-9]+]] // CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META74:![0-9]+]], metadata !DIExpression()), !dbg [[DBG75:![0-9]+]] // CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META76:![0-9]+]], metadata !DIExpression()), !dbg [[DBG77:![0-9]+]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB6:[0-9]+]]), !dbg [[DBG78:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: [[GEP_STRUCTARG:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 0 // CHECK-DEBUG-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG]], align 8 // CHECK-DEBUG-NEXT: [[GEP_A_ADDR18:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 1 // CHECK-DEBUG-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR18]], align 8 // CHECK-DEBUG-NEXT: [[GEP_B_ADDR19:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 2 // CHECK-DEBUG-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR19]], align 8 // CHECK-DEBUG-NEXT: [[GEP_R_ADDR20:%.*]] = getelementptr { { i32*, double*, float** }*, i32*, double*, float** }, { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]], i32 0, i32 3 // CHECK-DEBUG-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR20]], align 8 // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB6]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { { i32*, double*, float** }*, i32*, double*, float** }*)* @_Z14parallel_for_1Pfid..omp_par.4 to void (i32*, i32*, ...)*), { { i32*, double*, float** }*, i32*, double*, float** }* [[STRUCTARG17]]), !dbg [[DBG79:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT16:%.*]] // CHECK-DEBUG: omp.par.outlined.exit16: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: ret void, !dbg [[DBG81:![0-9]+]] // void parallel_for_1(float *r, int a, double b) { #pragma omp parallel { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) { *r = a + b; } } } } // CHECK-LABEL: @_Z14parallel_for_2Pfid( // CHECK-NEXT: entry: // CHECK-NEXT: [[STRUCTARG218:%.*]] = alloca { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, align 8 // CHECK-NEXT: [[STRUCTARG214:%.*]] = alloca { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, align 8 // CHECK-NEXT: [[STRUCTARG209:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-NEXT: [[I185:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8 // CHECK-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4 // CHECK-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4 // CHECK-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK: omp_parallel: // CHECK-NEXT: [[GEP_STRUCTARG214:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 0 // CHECK-NEXT: store { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG214]], { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }** [[GEP_STRUCTARG214]], align 8 // CHECK-NEXT: [[GEP_STRUCTARG219:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 1 // CHECK-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG219]], align 8 // CHECK-NEXT: [[GEP_A_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 2 // CHECK-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR]], align 8 // CHECK-NEXT: [[GEP_B_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 3 // CHECK-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR]], align 8 // CHECK-NEXT: [[GEP_R_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 4 // CHECK-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR]], align 8 // CHECK-NEXT: [[GEP_STRUCTARG209220:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 5 // CHECK-NEXT: store { i32*, double*, float** }* [[STRUCTARG209]], { i32*, double*, float** }** [[GEP_STRUCTARG209220]], align 8 // CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]]) // CHECK-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]] // CHECK: omp.par.outlined.exit184: // CHECK-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK: omp.par.exit.split: // CHECK-NEXT: store i32 0, i32* [[I185]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0 // CHECK-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4 // CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]) // CHECK-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]] // CHECK: omp_loop.preheader190: // CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1 // CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4 // CHECK-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4 // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1) // CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4 // CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]] // CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1 // CHECK-NEXT: br label [[OMP_LOOP_HEADER191:%.*]] // CHECK: omp_loop.header191: // CHECK-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ] // CHECK-NEXT: br label [[OMP_LOOP_COND192:%.*]] // CHECK: omp_loop.cond192: // CHECK-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]] // CHECK: omp_loop.body193: // CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]] // CHECK-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]) // CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double // CHECK-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8 // CHECK-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]] // CHECK-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float // CHECK-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8 // CHECK-NEXT: store float [[CONV202]], float* [[TMP11]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_INC194]] // CHECK: omp_loop.inc194: // CHECK-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1 // CHECK-NEXT: br label [[OMP_LOOP_HEADER191]] // CHECK: omp_loop.exit195: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM207]]) // CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]]) // CHECK-NEXT: br label [[OMP_LOOP_AFTER196:%.*]] // CHECK: omp_loop.after196: // CHECK-NEXT: ret void // // CHECK-DEBUG-LABEL: @_Z14parallel_for_2Pfid( // CHECK-DEBUG-NEXT: entry: // CHECK-DEBUG-NEXT: [[STRUCTARG218:%.*]] = alloca { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, align 8 // CHECK-DEBUG-NEXT: [[STRUCTARG214:%.*]] = alloca { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, align 8 // CHECK-DEBUG-NEXT: [[STRUCTARG209:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-DEBUG-NEXT: [[STRUCTARG:%.*]] = alloca { i32*, double*, float** }, align 8 // CHECK-DEBUG-NEXT: [[R_ADDR:%.*]] = alloca float*, align 8 // CHECK-DEBUG-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 // CHECK-DEBUG-NEXT: [[I185:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[AGG_CAPTURED186:%.*]] = alloca [[STRUCT_ANON_17:%.*]], align 8 // CHECK-DEBUG-NEXT: [[AGG_CAPTURED187:%.*]] = alloca [[STRUCT_ANON_18:%.*]], align 4 // CHECK-DEBUG-NEXT: [[DOTCOUNT_ADDR188:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_LASTITER203:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_LOWERBOUND204:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_UPPERBOUND205:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: [[P_STRIDE206:%.*]] = alloca i32, align 4 // CHECK-DEBUG-NEXT: store float* [[R:%.*]], float** [[R_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata float** [[R_ADDR]], metadata [[META133:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]] // CHECK-DEBUG-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[A_ADDR]], metadata [[META135:![0-9]+]], metadata !DIExpression()), !dbg [[DBG136:![0-9]+]] // CHECK-DEBUG-NEXT: store double [[B:%.*]], double* [[B_ADDR]], align 8 // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata double* [[B_ADDR]], metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG138:![0-9]+]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB13:[0-9]+]]), !dbg [[DBG139:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PARALLEL:%.*]] // CHECK-DEBUG: omp_parallel: // CHECK-DEBUG-NEXT: [[GEP_STRUCTARG214:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 0 // CHECK-DEBUG-NEXT: store { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG214]], { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }** [[GEP_STRUCTARG214]], align 8 // CHECK-DEBUG-NEXT: [[GEP_STRUCTARG219:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 1 // CHECK-DEBUG-NEXT: store { i32*, double*, float** }* [[STRUCTARG]], { i32*, double*, float** }** [[GEP_STRUCTARG219]], align 8 // CHECK-DEBUG-NEXT: [[GEP_A_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 2 // CHECK-DEBUG-NEXT: store i32* [[A_ADDR]], i32** [[GEP_A_ADDR]], align 8 // CHECK-DEBUG-NEXT: [[GEP_B_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 3 // CHECK-DEBUG-NEXT: store double* [[B_ADDR]], double** [[GEP_B_ADDR]], align 8 // CHECK-DEBUG-NEXT: [[GEP_R_ADDR:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 4 // CHECK-DEBUG-NEXT: store float** [[R_ADDR]], float*** [[GEP_R_ADDR]], align 8 // CHECK-DEBUG-NEXT: [[GEP_STRUCTARG209220:%.*]] = getelementptr { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]], i32 0, i32 5 // CHECK-DEBUG-NEXT: store { i32*, double*, float** }* [[STRUCTARG209]], { i32*, double*, float** }** [[GEP_STRUCTARG209220]], align 8 // CHECK-DEBUG-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB13]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*)* @_Z14parallel_for_2Pfid..omp_par.23 to void (i32*, i32*, ...)*), { { { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }*, { i32*, double*, float** }*, i32*, double*, float**, { i32*, double*, float** }* }* [[STRUCTARG218]]), !dbg [[DBG140:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_PAR_OUTLINED_EXIT184:%.*]] // CHECK-DEBUG: omp.par.outlined.exit184: // CHECK-DEBUG-NEXT: br label [[OMP_PAR_EXIT_SPLIT:%.*]] // CHECK-DEBUG: omp.par.exit.split: // CHECK-DEBUG-NEXT: call void @llvm.dbg.declare(metadata i32* [[I185]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG147:![0-9]+]] // CHECK-DEBUG-NEXT: store i32 0, i32* [[I185]], align 4, !dbg [[DBG147]] // CHECK-DEBUG-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON_17]], %struct.anon.17* [[AGG_CAPTURED186]], i32 0, i32 0, !dbg [[DBG148:![0-9]+]] // CHECK-DEBUG-NEXT: store i32* [[I185]], i32** [[TMP0]], align 8, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_18]], %struct.anon.18* [[AGG_CAPTURED187]], i32 0, i32 0, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP2:%.*]] = load i32, i32* [[I185]], align 4, !dbg [[DBG149:![0-9]+]] // CHECK-DEBUG-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: call void @__captured_stmt.19(i32* [[DOTCOUNT_ADDR188]], %struct.anon.17* [[AGG_CAPTURED186]]), !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[DOTCOUNT189:%.*]] = load i32, i32* [[DOTCOUNT_ADDR188]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_PREHEADER190:%.*]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.preheader190: // CHECK-DEBUG-NEXT: store i32 0, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT189]], 1, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: store i32 1, i32* [[P_STRIDE206]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM207:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42:[0-9]+]]), !dbg [[DBG148]] // CHECK-DEBUG-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]], i32 34, i32* [[P_LASTITER203]], i32* [[P_LOWERBOUND204]], i32* [[P_UPPERBOUND205]], i32* [[P_STRIDE206]], i32 1, i32 1), !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND204]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND205]], align 4, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]], !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191:%.*]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.header191: // CHECK-DEBUG-NEXT: [[OMP_LOOP_IV197:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER190]] ], [ [[OMP_LOOP_NEXT199:%.*]], [[OMP_LOOP_INC194:%.*]] ], !dbg [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_COND192:%.*]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.cond192: // CHECK-DEBUG-NEXT: [[OMP_LOOP_CMP198:%.*]] = icmp ult i32 [[OMP_LOOP_IV197]], [[TMP7]], !dbg [[DBG148]] // CHECK-DEBUG-NEXT: br i1 [[OMP_LOOP_CMP198]], label [[OMP_LOOP_BODY193:%.*]], label [[OMP_LOOP_EXIT195:%.*]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.body193: // CHECK-DEBUG-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV197]], [[TMP4]], !dbg [[DBG148]] // CHECK-DEBUG-NEXT: call void @__captured_stmt.20(i32* [[I185]], i32 [[TMP8]], %struct.anon.18* [[AGG_CAPTURED187]]), !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !dbg [[DBG150:![0-9]+]] // CHECK-DEBUG-NEXT: [[CONV200:%.*]] = sitofp i32 [[TMP9]] to double, !dbg [[DBG150]] // CHECK-DEBUG-NEXT: [[TMP10:%.*]] = load double, double* [[B_ADDR]], align 8, !dbg [[DBG151:![0-9]+]] // CHECK-DEBUG-NEXT: [[ADD201:%.*]] = fadd double [[CONV200]], [[TMP10]], !dbg [[DBG152:![0-9]+]] // CHECK-DEBUG-NEXT: [[CONV202:%.*]] = fptrunc double [[ADD201]] to float, !dbg [[DBG150]] // CHECK-DEBUG-NEXT: [[TMP11:%.*]] = load float*, float** [[R_ADDR]], align 8, !dbg [[DBG153:![0-9]+]] // CHECK-DEBUG-NEXT: store float [[CONV202]], float* [[TMP11]], align 4, !dbg [[DBG154:![0-9]+]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_INC194]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.inc194: // CHECK-DEBUG-NEXT: [[OMP_LOOP_NEXT199]] = add nuw i32 [[OMP_LOOP_IV197]], 1, !dbg [[DBG148]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_HEADER191]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.exit195: // CHECK-DEBUG-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB42]], i32 [[OMP_GLOBAL_THREAD_NUM207]]), !dbg [[DBG148]] // CHECK-DEBUG-NEXT: [[OMP_GLOBAL_THREAD_NUM208:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB42]]), !dbg [[DBG151]] // CHECK-DEBUG-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB43:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM208]]), !dbg [[DBG151]] // CHECK-DEBUG-NEXT: br label [[OMP_LOOP_AFTER196:%.*]], !dbg [[DBG148]] // CHECK-DEBUG: omp_loop.after196: // CHECK-DEBUG-NEXT: ret void, !dbg [[DBG155:![0-9]+]] // void parallel_for_2(float *r, int a, double b) { #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; #pragma omp parallel { #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #pragma omp for for (int i = 0; i < 100; ++i) *r = a + b; } #endif
main.c
#include "main.h" #include <math.h> #include <omp.h> #include <stdlib.h> #include <time.h> #include "../RT/Cameras/camera.h" #include "../RT/Vectors/vector3.h" #include "../RT/Worlds/world.h" void handleInput(World *world) { int x = 0, y = 0; size_t clicked = SDL_GetRelativeMouseState(&x, &y); // maybe multiply by the dot product with the plane perpendicular to (0,1,0). float mouseSpeed = sqrtf((float)(x*x*0.4f + y*y)); #pragma warning(push) #pragma warning(disable:4800) unsigned char *keys = SDL_GetKeyState(NULL); char leftClick = clicked & SDL_BUTTON(SDL_BUTTON_LEFT); char rightClick = clicked & SDL_BUTTON(SDL_BUTTON_RIGHT); char forward = keys[SDLK_w] | keys[SDLK_UP]; char backward = keys[SDLK_s] | keys[SDLK_DOWN]; char left = keys[SDLK_a] | keys[SDLK_LEFT]; char right = keys[SDLK_d] | keys[SDLK_RIGHT]; char up = keys[SDLK_SPACE]; char down = keys[SDLK_LSHIFT]; char turnRight = (x > 0) && (rightClick || leftClick); char turnLeft = (x < 0) && (rightClick || leftClick); char turnUp = (y < 0) && (rightClick || leftClick); char turnDown = (y > 0) && (rightClick || leftClick); char any = forward | backward | left | right | up | down | turnRight | turnLeft | turnUp | turnDown; #pragma warning(pop) if (!any) return; struct Camera camera = *world->camera; Vector3 globalUp = { 0.0f, 1.0f, 0.0f }; Vector3 newEye = camera.eye; Vector3 newFwd = camera.forward; Vector3 newRgt = camera.right; Vector3 newUp = camera.up; float velocity = 0.15f; newEye.x += (newFwd.x*velocity*forward) - (newFwd.x*velocity*backward) + (newRgt.x*velocity*right) - (newRgt.x*velocity*left) + (newUp.x*velocity*up) - (newUp.x*velocity*down); newEye.y += (newFwd.y*velocity*forward) - (newFwd.y*velocity*backward) + (newRgt.y*velocity*right) - (newRgt.y*velocity*left) + (newUp.y*velocity*up) - (newUp.y*velocity*down); newEye.z += (newFwd.z*velocity*forward) - (newFwd.z*velocity*backward) + (newRgt.z*velocity*right) - (newRgt.z*velocity*left) + (newUp.z*velocity*up) - (newUp.z*velocity*down); newFwd.x += (newRgt.x*(velocity*0.05f*mouseSpeed)*turnRight) - (newRgt.x*(velocity*0.05f*mouseSpeed)*turnLeft) + (newUp.x*(velocity*0.05f*mouseSpeed)*turnUp) - (newUp.x*(velocity*0.05f*mouseSpeed)*turnDown); newFwd.y += (newRgt.y*(velocity*0.05f*mouseSpeed)*turnRight) - (newRgt.y*(velocity*0.05f*mouseSpeed)*turnLeft) + (newUp.y*(velocity*0.05f*mouseSpeed)*turnUp) - (newUp.y*(velocity*0.05f*mouseSpeed)*turnDown); newFwd.z += (newRgt.z*(velocity*0.05f*mouseSpeed)*turnRight) - (newRgt.z*(velocity*0.05f*mouseSpeed)*turnLeft) + (newUp.z*(velocity*0.05f*mouseSpeed)*turnUp) - (newUp.z*(velocity*0.05f*mouseSpeed)*turnDown); float newFwdLengthRecip = 1.0f / sqrtf( (newFwd.x*newFwd.x) + (newFwd.y*newFwd.y) + (newFwd.z*newFwd.z)); newFwd.x *= newFwdLengthRecip; newFwd.y *= newFwdLengthRecip; newFwd.z *= newFwdLengthRecip; newRgt.x = (newFwd.y * globalUp.z) - (globalUp.y * newFwd.z); newRgt.y = (globalUp.x * newFwd.z) - (newFwd.x * globalUp.z); newRgt.z = (newFwd.x * globalUp.y) - (globalUp.x * newFwd.y); float newRgtLengthRecip = 1.0f / sqrtf( (newRgt.x*newRgt.x) + (newRgt.y*newRgt.y) + (newRgt.z*newRgt.z)); newRgt.x *= newRgtLengthRecip * camera.aspect; newRgt.y *= newRgtLengthRecip * camera.aspect; newRgt.z *= newRgtLengthRecip * camera.aspect; newUp.x = (newRgt.y * newFwd.z) - (newFwd.y * newRgt.z); newUp.y = (newFwd.x * newRgt.z) - (newRgt.x * newFwd.z); newUp.z = (newRgt.x * newFwd.y) - (newFwd.x * newRgt.y); float newUpLengthRecip = 1.0f / sqrtf( (newUp.x*newUp.x) + (newUp.y*newUp.y) + (newUp.z*newUp.z)); newUp.x *= newUpLengthRecip; newUp.y *= newUpLengthRecip; newUp.z *= newUpLengthRecip; if (any) { done_rendering = FALSE; camera.eye = newEye; camera.forward = newFwd; camera.right = newRgt; camera.up = newUp; *world->camera = camera; } } void setPixel(SDL_Surface *dst, unsigned int x, unsigned int y, unsigned int pixel) { int bpp = dst->format->BytesPerPixel; unsigned char *p = (unsigned char*)dst->pixels + y * dst->pitch + x * bpp; switch (bpp) { case 1: *p = pixel; break; case 2: *(unsigned short*)p = pixel; break; case 3: if (SDL_BYTEORDER == SDL_BIG_ENDIAN) { p[0] = (pixel >> 16) & 0xFF; p[1] = (pixel >> 8) & 0xFF; p[2] = pixel & 0xFF; } else { p[0] = pixel & 0xFF; p[1] = (pixel >> 8) & 0xFF; p[2] = (pixel >> 16) & 0xFF; } break; case 4: *(unsigned int*)p = pixel; break; } } void render(SDL_Surface *dst, World *world) { float screenWidthRecip = 1.0f / SCREEN_WIDTH; float screenHeightRecip = 1.0f / SCREEN_HEIGHT; float superSamplesRecip = 1.0f / SUPER_SAMPLES; float superSamplesSquaredRecip = 1.0f / (SUPER_SAMPLES * SUPER_SAMPLES); short x, y, k, l; #pragma omp parallel for private(y, k, l) for (x = 0; x < SCREEN_WIDTH; x += MAX_PIXEL_SIZE) { for (y = 0; y < SCREEN_HEIGHT; y += MAX_PIXEL_SIZE) { Vector3 color; unsigned int colorR = 0, colorG = 0, colorB = 0; float left = (float)(x - (MAX_PIXEL_SIZE * 0.5f)); float right = (float)(left + MAX_PIXEL_SIZE); float top = (float)(y - (MAX_PIXEL_SIZE * 0.5f)); float bottom = (float)(top + MAX_PIXEL_SIZE); float radius = (float)(((right - left - top + bottom) * 0.25f) * superSamplesRecip); for (float i = (float)(left + radius); i < right; i += 2.0f * radius) { for (float j = (float)(top + radius); j < bottom; j += 2.0f * radius) { color = colorAt(world, i * screenWidthRecip, j * screenHeightRecip); colorR += (unsigned int)(color.x * 255); colorG += (unsigned int)(color.y * 255); colorB += (unsigned int)(color.z * 255); } } colorR = (unsigned int)(colorR * superSamplesSquaredRecip); colorG = (unsigned int)(colorG * superSamplesSquaredRecip); colorB = (unsigned int)(colorB * superSamplesSquaredRecip); for (k = 0; k < MAX_PIXEL_SIZE; k++) for (l = 0; l < MAX_PIXEL_SIZE; l++) setPixel(dst, x + k < SCREEN_WIDTH ? x + k : x, y + l < SCREEN_HEIGHT ? y + l : y, (colorR << 16) | (colorG << 8) | colorB); } } done_rendering = TRUE; } void tick(World *world) { } #pragma warning(disable:4100) int main(int argc, char *argv[]) { SDL_Surface *screen; SDL_Event sdl_event; unsigned char running = FALSE; SDL_Init(SDL_INIT_VIDEO); SDL_putenv("SDL_VIDEO_CENTERED=center"); SDL_WM_SetCaption(SCREEN_TITLE, NULL); screen = SDL_SetVideoMode(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_BPP, SCREEN_FLAGS); srand((unsigned int)time(NULL)); printf("C Ray Tracing! Now with void pointers!\n\n"); printf("Screen resolution: [%d, %d]\n", SCREEN_WIDTH, SCREEN_HEIGHT); printf("Render resolution: [%d, %d]\n", RENDER_WIDTH * SUPER_SAMPLES, RENDER_HEIGHT * SUPER_SAMPLES); printf("Pixelsize: %d\n", MAX_PIXEL_SIZE); printf("Supersamples: %d\n", SUPER_SAMPLES); printf("Light samples: %d\n", LIGHT_SAMPLES); printf("Shape count: %d sphere(s), %d plane(s), %d cuboid(s)\n", SPHERE_COUNT, PLANE_COUNT, CUBOID_COUNT); printf("Light count: %d point light(s), %d distant light(s), %d sphere light(s)\n", POINT_LIGHT_COUNT, DIST_LIGHT_COUNT, SPHERE_LIGHT_COUNT); World world = makeWorld(RENDER_ASPECT); running = TRUE; done_rendering = FALSE; while (running) { size_t startTime = SDL_GetTicks(); while (SDL_PollEvent(&sdl_event)) { if ((sdl_event.type == SDL_QUIT) || (sdl_event.type == SDL_KEYDOWN && sdl_event.key.keysym.sym == SDLK_ESCAPE)) running = FALSE; } handleInput(&world); tick(&world); if (!done_rendering) { render(screen, &world); } SDL_Flip(screen); if ((SDL_GetTicks() - startTime) < SCREEN_DELAY) SDL_Delay(SCREEN_DELAY - (SDL_GetTicks() - startTime)); } destructWorld(&world); SDL_Quit(); exit(EXIT_SUCCESS); }
criticalOne.c
int main() { int s1, s2; int arr[10]; int * arr2 = (int *) malloc(15 * sizeof(int)); arr[0] = 1; #pragma omp parallel { int i = 0; #pragma omp for for (i = 0; i < 10; i++) { // C1 -- s1, s2 #pragma omp critical { s1 = s1 + 2; s2 = s1; } // C2 -- s2, arr #pragma omp critical { s2 = 5; arr[i] = 16; } // C3 -- s1, s2, arr #pragma omp critical { s1 = 1; s1 = s1 + 2; s2 = s1; arr[i] = 10; } } } }
cpu_bound.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <omp.h> #include <assert.h> #define PERIOD 2500000000UL #define ITERATIONS 10 #define STACK_SIZE (64 * 1024) struct workcnt { uint64_t cnt; } __attribute__ ((aligned (64))); static inline uint64_t rdtsc(void) { uint64_t eax, edx; __asm volatile ("rdtsc" : "=a" (eax), "=d" (edx)); return (edx << 32) | eax; } int main(int argc, char *argv[]) { static struct workcnt workcnt[32]; int nthreads; if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } uint64_t glast = rdtsc(); for(;;) { // Do some work #pragma omp parallel { uint64_t last = glast; for(;;) { workcnt[omp_get_thread_num()].cnt++; if(rdtsc() >= last + PERIOD) { break; } } } printf("%s: %lu: threads %d (%s), progress ", argv[0], rdtsc(), omp_get_num_threads(), omp_get_dynamic() ? "dynamic" : "static"); for(int n = 0; n < 32; n++) { printf("%lu ", workcnt[n].cnt); } printf("\n"); fflush(stdout); glast += PERIOD; } }
Texture.h
#pragma once #include <algorithm> template<typename T> class Texture1D { private: T * const data; const float size; public: Texture1D() : data(NULL), size(0) {} Texture1D(const T *_data, const int _size) : data(new T[_size]), size(_size) { // #pragma omp parallel for for (int i = 0; i < size; i++) data[i] = _data[i]; } ~Texture1D() { if (data) delete[] data; } T operator()(float s) const { using std::max; using std::min; s = max(0.0f,min(1.0f,s)); const float x = s*(size-1); const float x1 = floor(x); const float x2 = min(x1+1.0f,size-1); const T c1 = data[static_cast<int>(x1)]; const T c2 = data[static_cast<int>(x2)]; return c1 + c2*(x-x2); } }; template<typename T> class Texture2D { private: T * const data; const float width,height; public: Texture2D() : data(NULL), width(0), height(0) {} Texture2D(const T *_data, const int _width, const int _height) : data(new T[_width*_height]), width(_width), height(_height) { const int n = width*height; // #pragma omp parallel for for (int i = 0; i < n; i++) { data[i] = _data[i]; } } ~Texture2D() { if (data) delete[] data; } Texture2D& operator=(const Texture2D &src) { if (data) delete[] data; *this = Texture2D(src.data,src.width,src.height); return *this; } T operator()(float s, float t) const { using std::max; using std::min; s = max(0.0f,min(1.0f,s)); t = max(0.0f,min(1.0f,t)); const float x = s*(width-2); const float y = t*(height-2); const float x1 = floor(x); const float y1 = floor(y); const float x2 = min(x1+1.0f,width-1); const float y2 = min(y1+1.0f,height-1); const T c11 = data[static_cast<int>(y1*width + x1)]; const T c12 = data[static_cast<int>(y2*width + x1)]; const T c21 = data[static_cast<int>(y1*width + x2)]; const T c22 = data[static_cast<int>(y2*width + x2)]; return c11*(x2-x)*(y2-y) + c21*(x-x1)*(y2-y) + c12*(x2-x)*(y-y1) + c22*(x-x1)*(y-y1) ; } };
oracle_fmt_plug.c
/* * This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * UTF-8 support: Copyright magnum 2012 and hereby released to the general * public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, is permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_oracle; #elif FMT_REGISTERS_H john_register_one(&fmt_oracle); #else #include <string.h> #include <openssl/des.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "unicode.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "oracle" #define FORMAT_NAME "Oracle 10" #define FORMAT_TAG "O$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 120 // worst case UTF-8 is 40 characters of Unicode, that'll do #define BINARY_SIZE 8 #define BINARY_ALIGN 4 #define MAX_USERNAME_LEN 30 #define SALT_SIZE (MAX_USERNAME_LEN*2 + 4) // also contain the NULL #define SALT_ALIGN 2 #define CIPHERTEXT_LENGTH 16 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 //#define DEBUG_ORACLE static struct fmt_tests tests[] = { {"O$SYSTEM#9EEDFA0AD26C6D52", "THALES" }, {"O$SIMON#4F8BC1809CB2AF77", "A"}, {"O$SIMON#183D72325548EF11", "THALES2" }, {"O$SIMON#C4EB3152E17F24A4", "TST" }, {"O$BOB#b02c8e79ed2e7f46", "LAPIN" }, {"O$BOB#6bb4e95898c88011", "LAPINE" }, {"O$BOB#cdc6b483874b875b", "GLOUGLOU" }, {"O$BOB#ef1f9139db2d5279", "GLOUGLOUTER" }, {"O$BOB#c0ee5107c9a080c1", "AZERTYUIOP" }, {"O$BOB#99e8b231d33772f9", "CANARDWC" }, {"O$BOB#da3224126a67c8ed", "COUCOU_COUCOU" }, {"O$bob#ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI" }, {"9EEDFA0AD26C6D52", "THALES", {"SYSTEM"} }, {"4F8BC1809CB2AF77", "A", {"SIMON"} }, {"183D72325548EF11", "THALES2", {"SIMON"} }, {"C4EB3152E17F24A4", "TST", {"SIMON"} }, {"b02c8e79ed2e7f46", "LAPIN", {"BOB"} }, {"6bb4e95898c88011", "LAPINE", {"BOB"} }, {"cdc6b483874b875b", "GLOUGLOU", {"bob"} }, // put some low case in there, to make SURE the up case conversion works. {"ef1f9139db2d5279", "GLOUGLOUTER", {"bob"} }, // also these 2 make sure lower cased passwords 'match' the 'get_key' method in the format tests. {"c0ee5107c9a080c1", "AZERTYUIOP", {"BOB"} }, {"99e8b231d33772f9", "CANARDWC", {"BOB"} }, {"da3224126a67c8ed", "COUCOU_COUCOU", {"BOB"} }, {"ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI", {"BOB"} }, {NULL} }; #if ARCH_LITTLE_ENDIAN #define ENDIAN_SHIFT_L << 8 #define ENDIAN_SHIFT_R >> 8 #else #define ENDIAN_SHIFT_L #define ENDIAN_SHIFT_R #endif static UTF16 cur_salt[SALT_SIZE / 2 + PLAINTEXT_LENGTH]; static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1]; static char (*plain_key)[PLAINTEXT_LENGTH + 1]; static int (*key_length); static ARCH_WORD_32 (*crypt_key)[2]; static DES_key_schedule desschedule_static; static int salt_length; static int valid(char *ciphertext, struct fmt_main *self) { int i; int l; /* * 2 cases * 1 - it comes from the disk, and does not have O$ + salt * 2 - it comes from memory, and has got O$ + salt + # + blah */ if (strlen(ciphertext) > CIPHERTEXT_LENGTH + 3 + MAX_USERNAME_LEN * (options.input_enc == UTF_8 ? 3 : 1)) return 0; if (!memcmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) { int len; char name[MAX_USERNAME_LEN + 1]; UTF16 name16[MAX_USERNAME_LEN + 1 + 1]; ciphertext += FORMAT_TAG_LEN; l = strlen(ciphertext) - CIPHERTEXT_LENGTH; if (l <= 0) return 0; if (ciphertext[l-1] != '#') return 0; strnzcpy(name, ciphertext, sizeof(name)); len = enc_to_utf16(name16, MAX_USERNAME_LEN + 1, (UTF8*)name, strlen(name)); if (len < 0) { static int error_shown = 0; #ifdef HAVE_FUZZ if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK)) return 0; #endif if (!error_shown) fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label); error_shown = 1; return 0; } if (len > MAX_USERNAME_LEN) return 0; } else { if(strlen(ciphertext)!=CIPHERTEXT_LENGTH) return 0; l = 0; } for (i = l; i < l + CIPHERTEXT_LENGTH; i++){ if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *cp; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) return split_fields[1]; if (!split_fields[0]) return split_fields[1]; cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 4); sprintf (cp, "%s%s#%s", FORMAT_TAG, split_fields[0], split_fields[1]); if (valid(cp, self)) { UTF8 tmp8[MAX_USERNAME_LEN * 3 + 1]; int utf8len; // we no longer need this. It was just used for valid(). We will recompute // all lengths, after we do an upcase, since upcase can change the length of the // utf8 string. MEM_FREE(cp); // Upcase user name, --encoding aware utf8len = enc_uc(tmp8, sizeof(tmp8), (unsigned char*)split_fields[0], strlen(split_fields[0])); cp = mem_alloc_tiny(utf8len + strlen(split_fields[1]) + 4, MEM_ALIGN_NONE); sprintf (cp, "%s%s#%s", FORMAT_TAG, tmp8, split_fields[1]); #ifdef DEBUG_ORACLE printf ("tmp8 : %s\n", tmp8); #endif return cp; } MEM_FREE(cp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[FORMAT_TAG_LEN + sizeof(cur_salt) + 1 + CIPHERTEXT_LENGTH]; char *cp; strnzcpy(out, ciphertext, sizeof(out)); enc_strupper(&out[FORMAT_TAG_LEN]); cp = strrchr(out, '#'); if (cp) strlwr(cp); return out; } static void init(struct fmt_main *self) { DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule_static); #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif cur_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cur_key)); plain_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*plain_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); key_length = mem_calloc(self->params.max_keys_per_crypt, sizeof(*key_length)); } static void done(void) { MEM_FREE(key_length); MEM_FREE(crypt_key); MEM_FREE(plain_key); MEM_FREE(cur_key); } static void set_salt(void *salt) { salt_length = ((unsigned short *)salt)[0]; memcpy(cur_salt, &((unsigned short *)salt)[1], salt_length); } static void oracle_set_key(char *key, int index) { UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1]; UTF16 *c; strcpy(plain_key[index], key); // Can't use enc_to_utf16_be() because we need to do utf16_uc later key_length[index] = enc_to_utf16((UTF16 *)cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key)); if (key_length[index] < 0) key_length[index] = strlen16(cur_key_mixedcase); // We convert and uppercase in one shot key_length[index] = utf16_uc((UTF16 *)cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length[index]); // we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase, // and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves. if (key_length[index] < 0) key_length[index] *= -1; // Now byte-swap to UTF16-BE c = cur_key[index]; while((*c = *c << 8 | *c >> 8)) c++; key_length[index] *= sizeof(UTF16); #ifdef DEBUG_ORACLE dump_stuff_msg("cur_key ", (unsigned char*)&cur_key[index][0], key_length[index]); #endif } static char *get_key(int index) { static UTF8 UC_Key[PLAINTEXT_LENGTH*3*3+1]; // Calling this will ONLY upcase characters 'valid' in the code page. There are MANY // code pages which mssql WILL upcase the letter (in UCS-2), but there is no upper case value // in the code page. Thus we MUST keep the lower cased letter in this case. enc_uc(UC_Key, sizeof(UC_Key), (UTF8*)plain_key[index], strlen(plain_key[index])); return (char*)UC_Key; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int idx = 0; #ifdef _OPENMP #pragma omp parallel for for (idx = 0; idx < count; idx++) #endif { unsigned char buf[sizeof(cur_salt)]; unsigned char buf2[SALT_SIZE + PLAINTEXT_LENGTH*2]; DES_key_schedule sched_local; unsigned int l; l = salt_length + key_length[idx]; memcpy(buf2, cur_salt, salt_length); memcpy(buf2 + salt_length, cur_key[idx], key_length[idx]); #ifdef DEBUG_ORACLE dump_stuff_msg("cur_salt ", buf2, salt_length+key_length[idx]); #endif crypt_key[idx][0] = 0; crypt_key[idx][1] = 0; DES_ncbc_encrypt(buf2, buf, l, &desschedule_static, (DES_cblock *) crypt_key[idx], DES_ENCRYPT); DES_set_key((DES_cblock *)crypt_key[idx], &sched_local); crypt_key[idx][0] = 0; crypt_key[idx][1] = 0; DES_ncbc_encrypt(buf2, buf, l, &sched_local, (DES_cblock *) crypt_key[idx], DES_ENCRYPT); #ifdef DEBUG_ORACLE dump_stuff_msg(" crypt_key ", (unsigned char*)&crypt_key[idx][0], 8); #endif } return count; } static void * get_binary(char *ciphertext) { static unsigned char *out3; int l; int i; if (!out3) out3 = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); l = strlen(ciphertext) - CIPHERTEXT_LENGTH; for(i=0;i<BINARY_SIZE;i++) { out3[i] = atoi16[ARCH_INDEX(ciphertext[i*2+l])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+l+1])]; } return out3; } static void * get_salt(char * ciphertext) { static UTF16 *out; UTF8 salt[SALT_SIZE + 1]; int l; if (!out) out = mem_alloc_tiny(SALT_SIZE+2, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE+2); ciphertext += FORMAT_TAG_LEN; l = 0; while( ciphertext[l] && (ciphertext[l]!='#') ) { salt[l] = ciphertext[l]; l++; if (l >= SALT_SIZE-2) break; } salt[l] = 0; // Encoding-aware shift to upper-case enc_strupper((char*)salt); l = enc_to_utf16_be(&out[1], MAX_USERNAME_LEN, (UTF8 *)salt, l); out[0] = (l<<1); return out; } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = ((UTF16*)salt) + 1; unsigned int hash = 5381; while (*s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static int get_hash_0(int idx) { return crypt_key[idx][0] & PH_MASK_0; } static int get_hash_1(int idx) { return crypt_key[idx][0] & PH_MASK_1; } static int get_hash_2(int idx) { return crypt_key[idx][0] & PH_MASK_2; } static int get_hash_3(int idx) { return crypt_key[idx][0] & PH_MASK_3; } static int get_hash_4(int idx) { return crypt_key[idx][0] & PH_MASK_4; } static int get_hash_5(int idx) { return crypt_key[idx][0] & PH_MASK_5; } static int get_hash_6(int idx) { return crypt_key[idx][0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { int i; ARCH_WORD_32 b = *(ARCH_WORD_32*)binary; for (i = 0; i < count; ++i) if (b == *((ARCH_WORD_32*)(crypt_key[i])) ) return 1; return 0; } static int cmp_one(void *binary, int idx) { return !memcmp(binary, crypt_key[idx], sizeof(crypt_key[idx])); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_oracle = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, oracle_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
shallow_water_variables_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #if !defined(KRATOS_SHALLOW_WATER_VARIABLES_UTILITY_H_INCLUDED) #define KRATOS_SHALLOW_WATER_VARIABLES_UTILITY_H_INCLUDED // System includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "processes/find_nodal_neighbours_process.h" // External includes // Project includes #include "shallow_water_application.h" namespace Kratos { typedef std::size_t IndexType; typedef std::vector<IndexType> IndexVectorType; typedef std::map<int, Properties::Pointer> PropertiesMapType; class ShallowWaterVariablesUtility { public: KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterVariablesUtility); ShallowWaterVariablesUtility(ModelPart& rModelPart, const double& rDryHeight = 1e-3) : mrModelPart(rModelPart) { KRATOS_TRY std::cout << "Initializing shallow water variables utility" << std::endl; mWaterHeightConvert = mrModelPart.GetProcessInfo()[WATER_HEIGHT_UNIT_CONVERTER]; mDryHeight = rDryHeight; mZeroValue = 1e-8; KRATOS_CATCH("") } ~ShallowWaterVariablesUtility() {} /** * This method computes the free surface elevation as HEIGHT + BATHYMETRY */ void ComputeFreeSurfaceElevation() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; node->FastGetSolutionStepValue(FREE_SURFACE_ELEVATION) = node->FastGetSolutionStepValue(HEIGHT) + (node->FastGetSolutionStepValue(BATHYMETRY) / mWaterHeightConvert); } KRATOS_CATCH("") } /** * This method computes the water height as FREE_SURFACE_ELEVATION - BATHYMETRY */ void ComputeHeightFromFreeSurface() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; node->FastGetSolutionStepValue(HEIGHT) = node->FastGetSolutionStepValue(FREE_SURFACE_ELEVATION) - (node->FastGetSolutionStepValue(BATHYMETRY) / mWaterHeightConvert); } KRATOS_CATCH("") } /** * This method computes the velocity as the MOMENTUM / HEIGHT */ void ComputeVelocity() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; node->GetSolutionStepValue(VELOCITY) = node->FastGetSolutionStepValue(MOMENTUM) / (node->FastGetSolutionStepValue(HEIGHT) * mWaterHeightConvert); } KRATOS_CATCH("") } /** * This method computes the momentum as the VELOCITY * HEIGHT */ void ComputeMomentum() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; node->GetSolutionStepValue(MOMENTUM) = node->FastGetSolutionStepValue(VELOCITY) * (node->FastGetSolutionStepValue(HEIGHT) * mWaterHeightConvert); } KRATOS_CATCH("") } /** * Set the dry nodes to zero */ void CheckDryConservedVariables() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; if (node->FastGetSolutionStepValue(HEIGHT) < mDryHeight && node->FastGetSolutionStepValue(RAIN) < mDryHeight ) { node->FastGetSolutionStepValue(HEIGHT) = mZeroValue; node->FastGetSolutionStepValue(MOMENTUM_X) = 0; node->FastGetSolutionStepValue(MOMENTUM_Y) = 0; } } KRATOS_CATCH("") } /** * Set the dry nodes to zero */ void CheckDryPrimitiveVariables() { KRATOS_TRY const int nnodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator nodes_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nnodes; i++) { auto node = nodes_begin + i; if (node->FastGetSolutionStepValue(HEIGHT) < mDryHeight && node->FastGetSolutionStepValue(RAIN) < mDryHeight ) { node->FastGetSolutionStepValue(HEIGHT) = mZeroValue; node->FastGetSolutionStepValue(VELOCITY_X) = 0; node->FastGetSolutionStepValue(VELOCITY_Y) = 0; } } KRATOS_CATCH("") } /** * This method looks for the dry elements and sets the proper flag */ void SetDryWetState() { KRATOS_TRY // Getting the elements from the model const int nelem = static_cast<int>(mrModelPart.Elements().size()); ModelPart::ElementsContainerType::iterator elem_begin = mrModelPart.ElementsBegin(); int nodes; // Number of element nodes bool wet_node; // The nodal flag // And now, if an element has all nodes dry, it is not active #pragma omp parallel for for(int k = 0; k < nelem; k++) { auto elem = elem_begin + k; nodes = elem->GetGeometry().size(); wet_node = false; for(int l = 0; l < nodes; l++) { if (elem->GetGeometry()[l].FastGetSolutionStepValue(HEIGHT) >= mDryHeight || elem->GetGeometry()[l].FastGetSolutionStepValue(RAIN) >= mDryHeight ) wet_node = true; // It means there is almost a wet node } if (wet_node) { elem->Set(FLUID, true); elem->Set(ACTIVE, true); } else { elem->Set(FLUID, false); elem->Set(ACTIVE, false); } } KRATOS_CATCH("") } /** * This method creates the dry properties as a copy of the wet properties * The only difference between them is for visualization purpose */ void DefineDryProperties() { // Create a copy for each property const int nprop = static_cast<int>(mrModelPart.NumberOfProperties()); ModelPart::PropertiesContainerType::iterator prop_begin = mrModelPart.PropertiesBegin(); IndexType last_id = 0; IndexVectorType prop_id; for (int i = 0; i < nprop; i++) { auto prop = prop_begin + i; if (prop->Id() > last_id) last_id = prop->Id(); prop_id.push_back(prop->Id()); } for (auto id : prop_id) { // Get pointers to the properties and create the dry property Properties::Pointer wet_prop = mrModelPart.pGetProperties(id); // This work around is inefficient. TODO: find another way Properties::Pointer dry_prop(new Properties(*wet_prop)); dry_prop->SetId(++last_id); // Add the new property and add them to the maps mrModelPart.AddProperties(dry_prop); mWetToDryPropertiesMap[wet_prop->Id()] = dry_prop; mDryToWetPropertiesMap[dry_prop->Id()] = wet_prop; } } /** * This method assign the wet and dry properties * Wet and dry are tween properties * The only difference between them is for visualization purpose * ExecuteBeforOutputStep * @see DefineDryProperties */ void AssignDryWetProperties() { const int nelem = static_cast<int>(mrModelPart.Elements().size()); ModelPart::ElementsContainerType::iterator elem_begin = mrModelPart.ElementsBegin(); #pragma omp parallel for for (int i = 0; i < nelem; i++) { auto elem = elem_begin + i; if (elem->Is(FLUID)) { auto search = mDryToWetPropertiesMap.find(elem->GetProperties().Id()); if (search != mDryToWetPropertiesMap.end()) // The element was dry elem->SetProperties(search->second); } else { auto search = mWetToDryPropertiesMap.find(elem->GetProperties().Id()); if (search != mWetToDryPropertiesMap.end()) // The element was wet { elem->SetProperties(search->second); } } } } /** * This method sets the mesh position for visualization purpose * ExecuteBeforeOutputStep * @see ResetMeshPosition */ void SetMeshPosition() { // Move mesh to the current position const int nodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator node_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nodes; i++) { auto node = node_begin + i; if (node->FastGetSolutionStepValue(HEIGHT) <= mDryHeight) { double value = node->FastGetSolutionStepValue(BATHYMETRY); node->Z() = value; node->FastGetSolutionStepValue(DISPLACEMENT_Z) = value; } else { double value = node->FastGetSolutionStepValue(FREE_SURFACE_ELEVATION); node->Z() = value; node->FastGetSolutionStepValue(DISPLACEMENT_Z) = value; } } } /** * This method resets the mesh to the original position (Z0 = 0) * ExecuteAfterOutputStep * @see SetMeshPosition */ void ResetMeshPosition() { // Move mesh to the original position const int nodes = static_cast<int>(mrModelPart.Nodes().size()); ModelPart::NodesContainerType::iterator node_begin = mrModelPart.NodesBegin(); #pragma omp parallel for for(int i = 0; i < nodes; i++) { auto node = node_begin + i; node->Z() = node->Z0(); } } /** * This method sets the all the elements active for visualization purpose * ExecuteBeforeOutputStep * @see AssignDryWetProperties * @see SetDryWetState */ void SetElementsActive() { const int nelem = static_cast<int>(mrModelPart.Elements().size()); ModelPart::ElementsContainerType::iterator elem_begin = mrModelPart.ElementsBegin(); #pragma omp parallel for for (int i = 0; i < nelem; i++) { auto elem = elem_begin + i; elem->Set(ACTIVE, true); } } protected: private: ModelPart& mrModelPart; double mWaterHeightConvert; double mDryHeight; double mZeroValue; PropertiesMapType mWetToDryPropertiesMap; PropertiesMapType mDryToWetPropertiesMap; }; // class ShallowWaterVariablesUtility } // namespace Kratos # endif // KRATOS_SHALLOW_WATER_VARIABLES_UTILITY_H_INCLUDED
GB_unaryop__abs_bool_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int64 // op(A') function: GB_tran__abs_bool_int64 // C type: bool // A type: int64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int64 ( bool *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GrB_Monoid_wait.c
//------------------------------------------------------------------------------ // GrB_Monoid_wait: wait for a user-defined GrB_Monoid to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Monoid has no pending // operations to wait for. All this method does is verify that the monoid is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_Monoid_wait // no work, just check if the GrB_Monoid is valid ( GrB_Monoid *monoid ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE1 ("GrB_Monoid_wait (&monoid)") ; GB_RETURN_IF_NULL (monoid) ; GB_RETURN_IF_NULL_OR_FAULTY (*monoid) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
declare-variant-4.c
double f1 (int, long, float); double f2 (int, long, float); double f3 (int, long, float); double f4 (int, long, float); double f5 (int, long, float); #pragma omp declare variant (f1) match (user={condition(1)}) #pragma omp declare variant (f2) match (user={condition(score(1):1)}) #pragma omp declare variant (f3) match (user={condition(score(3):1)}) #pragma omp declare variant (f4) match (user={condition(score(2):1)}) #pragma omp declare variant (f5) match (implementation={vendor(gnu)}) double f6 (int x, long y, float z) { return z + x + y; } double test (int x) { return f6 (x, x, 3.5f); }
pmv-OpenMP-reduction_atcgrid.c
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #ifdef _OPENMP #include <omp.h> #else #define omp_set_dynamic(0); #define omp_set_num_threads(12); #endif int main(int argc, char ** argv){ int **M; int *v1, *v2; int i, k, N; double cgt1, cgt2, ncgt; //para tiempo de ejecución time_t t; // Semilla de rand() srand((unsigned) time(&t)); // Obtenemos el numero de filas x columnas de la matriz cuadrada if(argc < 2){ fprintf(stderr,"Falta iteraciones\n"); exit(-1); } N = atoi(argv[1]); // == Reserva de Memoria // ====================================================> v1 = (int *) malloc(N*sizeof(int)); v2 = (int *) malloc(N*sizeof(int)); if ( v1 == NULL || v2 == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } M = (int**) malloc (N*sizeof(int*)); // i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue // i = 0, i = 3, i = 6 para un bucle de N = 9 #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++){ M[i] = (int*) malloc (N*sizeof(int)); if( M[i] == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } // == Inicializacion // ====================================================> // M, v1, v2, N, i compartidas // Cada hebra se encargará de una parte del bucle usando i // k es privada // Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial for(i = 0; i<N; i++){ #pragma omp parallel for shared(M,i,N) private(k) default(none) for(k = 0; k<N; k++) M[i][k] = rand() % 8; } #pragma omp parallel for shared(v1,v2,N) private(i) default(none) for(i = 0; i<N; i++){ v1[i] = rand() % 6; v2[i] = 0; } // == Calculo // ====================================================> cgt1 = omp_get_wtime(); // Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia // local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones. int sumalocal; for(i = 0; i<N; i++){ sumalocal = 0; #pragma omp parallel for reduction(+:sumalocal) shared(M,i,N,v2,v1) private(k) default(none) for(k = 0; k<N; k++) sumalocal += M[i][k] * v1[k]; v2[i] = sumalocal; } cgt2 = omp_get_wtime(); ncgt = (double)(cgt2 - cgt1); // == Imprimir Mensajes // ====================================================> printf("Tiempo(seg.):%11.9f\n", ncgt); printf("Tamaño de los vectores: %u\n", N); printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int)); printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int)); // Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador // eliminen el código de la suma. printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]); // Para tamaños pequeños de N < 15 mostrar los valores calculados if(N < 15){ printf("\n----------- Matriz M ----------- \n"); for(i = 0; i<N; i++){ for(k = 0; k<N; k++) printf("%u\t", M[i][k]); printf("\n"); } printf("\n----------- Vector V1 ----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v1[i]); printf("\n"); printf("\n----------- Vector V2----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v2[i]); printf("\n"); } // == Liberar Memoria // ====================================================> free(v1); free(v2); #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++) free(M[i]); free(M); }
streamcluster_omp.c
/*********************************************** streamcluster_omp.cpp : parallelized code of streamcluster using OpenMP - original code from PARSEC Benchmark Suite - parallelization with OpenMP API has been applied by Sang-Ha (a.k.a Shawn) Lee - sl4ge@virginia.edu University of Virginia Department of Electrical and Computer Engineering Department of Computer Science - modified for Nautilus testing by pdinda@northwestern.edu ***********************************************/ #include <nautilus/nautilus.h> #include <nautilus/scheduler.h> #include <nautilus/libccompat.h> #include <nautilus/naut_string.h> #include <rt/omp/omp.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif //eliminate C++ as much as possible //using namespace std; typedef int bool; #define pthread_barrier_t int #define pthread_t int #define calloc(n,s) ({ void *_p=malloc(n*s); memset(_p,0,n*s); _p; }) #define new(t) calloc(sizeof(t),1) #define newa(t,n) calloc(sizeof(t),n) #define del(t) free(t) #define dela(t) free(t) #define MAXNAMESIZE 1024 // max filename length #define SEED 1 /* increase this to reduce probability of random error */ /* increasing it also ups running time of "speedy" part of the code */ /* SP = 1 seems to be fine */ #define SP 1 // number of repetitions of speedy must be >=1 /* higher ITER --> more likely to get correct # of centers */ /* higher ITER also scales the running time almost linearly */ #define ITER 3 // iterate ITER* k log k times; ITER >= 1 //#define PRINTINFO //comment this out to disable output #define PROFILE // comment this out to disable instrumentation code //#define ENABLE_THREADS // comment this out to disable threads //#define INSERT_WASTE //uncomment this to insert waste computation into dist function #define CACHE_LINE 512 // cache line in byte /* this structure represents a point */ /* these will be passed around to avoid copying coordinates */ typedef struct { float weight; float *coord; long assign; /* number of point where this one is assigned */ float cost; /* cost of that assignment, weight*distance */ } Point; /* this is the array of points */ typedef struct { long num; /* number of points; may not be N if this is a sample */ int dim; /* dimensionality */ Point *p; /* the array itself */ } Points; static bool *switch_membership; //whether to switch membership in pgain static bool* is_center; //whether a point is a center static int* center_table; //index table of centers float* block; static int nproc; //# of threads static int c, d; static int ompthreads; // instrumentation code #ifdef PROFILE static double time_local_search; static double time_speedy; static double time_select_feasible; static double time_gain; static double time_shuffle; static double time_gain_dist; static double time_gain_init; #endif static double gettime() { // struct timeval t; // gettimeofday(&t,NULL); //return (double)t.tv_sec+t.tv_usec*1e-6; return ((double)(nk_sched_get_realtime()))/1e9; } static int isIdentical(float *i, float *j, int D) // tells whether two points of D dimensions are identical { int a = 0; int equal = 1; while (equal && a < D) { if (i[a] != j[a]) equal = 0; else a++; } if (equal) return 1; else return 0; } /* comparator for floating point numbers */ static int floatcomp(const void *i, const void *j) { float a, b; a = *(float *)(i); b = *(float *)(j); if (a > b) return (1); if (a < b) return (-1); return(0); } /* shuffle points into random order */ static void shuffle(Points *points) { #ifdef PROFILE double t1 = gettime(); #endif long i, j; Point temp; for (i=0;i<points->num-1;i++) { j=(lrand48()%(points->num - i)) + i; temp = points->p[i]; points->p[i] = points->p[j]; points->p[j] = temp; } #ifdef PROFILE double t2 = gettime(); time_shuffle += t2-t1; #endif } /* shuffle an array of integers */ static void intshuffle(int *intarray, int length) { #ifdef PROFILE double t1 = gettime(); #endif long i, j; int temp; for (i=0;i<length;i++) { j=(lrand48()%(length - i))+i; temp = intarray[i]; intarray[i]=intarray[j]; intarray[j]=temp; } #ifdef PROFILE double t2 = gettime(); time_shuffle += t2-t1; #endif } #ifdef INSERT_WASTE static double waste(double s ) { for( int i =0 ; i< 4; i++ ) { s += pow(s,0.78); } return s; } #endif /* compute Euclidean distance squared between two points */ static float dist(Point p1, Point p2, int dim) { int i; float result=0.0; for (i=0;i<dim;i++) result += (p1.coord[i] - p2.coord[i])*(p1.coord[i] - p2.coord[i]); #ifdef INSERT_WASTE double s = waste(result); result += s; result -= s; #endif return(result); } /* run speedy on the points, return total cost of solution */ static float pspeedy(Points *points, float z, long *kcenter, int pid, pthread_barrier_t* barrier) { #ifdef PROFILE double t1 = gettime(); #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif //my block long bsize = points->num/nproc; long k1 = bsize * pid; long k2 = k1 + bsize; if( pid == nproc-1 ) k2 = points->num; static double totalcost; static bool open = false; static double* costs; //cost for each thread. static int i; #ifdef ENABLE_THREADS static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; #endif #ifdef PRINTINFO if( pid == 0 ){ fprintf(stderr, "Speedy: facility cost %lf\n", z); } #endif /* create center at first point, send it to itself */ for( int k = k1; k < k2; k++ ) { float distance = dist(points->p[k],points->p[0],points->dim); points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=0; } if( pid==0 ) { *kcenter = 1; costs = (double*)malloc(sizeof(double)*nproc); } if( pid != 0 ) { // we are not the master threads. we wait until a center is opened. while(1) { #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); while(!open) pthread_cond_wait(&cond,&mutex); pthread_mutex_unlock(&mutex); #endif if( i >= points->num ) break; for( int k = k1; k < k2; k++ ) { float distance = dist(points->p[i],points->p[k],points->dim); if( distance*points->p[k].weight < points->p[k].cost ) { points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=i; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); pthread_barrier_wait(barrier); #endif } } else { // I am the master thread. I decide whether to open a center and notify others if so. for(i = 1; i < points->num; i++ ) { bool to_open = ((float)lrand48()/(float)INT_MAX)<(points->p[i].cost/z); if( to_open ) { (*kcenter)++; #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); #endif open = true; #ifdef ENABLE_THREADS pthread_mutex_unlock(&mutex); pthread_cond_broadcast(&cond); #endif for( int k = k1; k < k2; k++ ) { float distance = dist(points->p[i],points->p[k],points->dim); if( distance*points->p[k].weight < points->p[k].cost ) { points->p[k].cost = distance * points->p[k].weight; points->p[k].assign=i; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif open = false; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } } #ifdef ENABLE_THREADS pthread_mutex_lock(&mutex); #endif open = true; #ifdef ENABLE_THREADS pthread_mutex_unlock(&mutex); pthread_cond_broadcast(&cond); #endif } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif open = false; double mytotal = 0; for( int k = k1; k < k2; k++ ) { mytotal += points->p[k].cost; } costs[pid] = mytotal; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif // aggregate costs from each thread if( pid == 0 ) { totalcost=z*(*kcenter); for( int i = 0; i < nproc; i++ ) { totalcost += costs[i]; } free(costs); } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif #ifdef PRINTINFO if( pid == 0 ) { fprintf(stderr, "Speedy opened %d facilities for total cost %lf\n", *kcenter, totalcost); fprintf(stderr, "Distance Cost %lf\n", totalcost - z*(*kcenter)); } #endif #ifdef PROFILE double t2 = gettime(); if( pid== 0 ) { time_speedy += t2 -t1; } #endif return(totalcost); } /* For a given point x, find the cost of the following operation: * -- open a facility at x if there isn't already one there, * -- for points y such that the assignment distance of y exceeds dist(y, x), * make y a member of x, * -- for facilities y such that reassigning y and all its members to x * would save cost, realize this closing and reassignment. * * If the cost of this operation is negative (i.e., if this entire operation * saves cost), perform this operation and return the amount of cost saved; * otherwise, do nothing. */ /* numcenters will be updated to reflect the new number of centers */ /* z is the facility cost, x is the number of this point in the array points */ static double pgain(long x, Points *points, double z, long int *numcenters, int pid, pthread_barrier_t* barrier) { // printf("pgain pthread %d begin\n",pid); #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif #ifdef PROFILE double t0 = gettime(); #endif //my block long bsize = points->num/nproc; long k1 = bsize * pid; long k2 = k1 + bsize; if( pid == nproc-1 ) k2 = points->num; int i; int number_of_centers_to_close = 0; static double *work_mem; static double gl_cost_of_opening_x; static int gl_number_of_centers_to_close; //each thread takes a block of working_mem. int stride = *numcenters+2; //make stride a multiple of CACHE_LINE int cl = CACHE_LINE/sizeof(double); if( stride % cl != 0 ) { stride = cl * ( stride / cl + 1); } int K = stride -2 ; // K==*numcenters //my own cost of opening x double cost_of_opening_x = 0; if( pid==0 ) { work_mem = (double*) malloc(stride*(nproc+1)*sizeof(double)); gl_cost_of_opening_x = 0; gl_number_of_centers_to_close = 0; } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif /*For each center, we have a *lower* field that indicates how much we will save by closing the center. Each thread has its own copy of the *lower* fields as an array. We first build a table to index the positions of the *lower* fields. */ int count = 0; for( int i = k1; i < k2; i++ ) { if( is_center[i] ) { center_table[i] = count++; } } work_mem[pid*stride] = count; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif if( pid == 0 ) { int accum = 0; for( int p = 0; p < nproc; p++ ) { int tmp = (int)work_mem[p*stride]; work_mem[p*stride] = accum; accum += tmp; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif for( int i = k1; i < k2; i++ ) { if( is_center[i] ) { center_table[i] += (int)work_mem[pid*stride]; } } //now we finish building the table. clear the working memory. memset(switch_membership + k1, 0, (k2-k1)*sizeof(bool)); memset(work_mem+pid*stride, 0, stride*sizeof(double)); if( pid== 0 ) memset(work_mem+nproc*stride,0,stride*sizeof(double)); #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif #ifdef PROFILE double t1 = gettime(); if( pid == 0 ) time_gain_init += t1-t0; #endif //my *lower* fields double* lower = &work_mem[pid*stride]; //global *lower* fields double* gl_lower = &work_mem[nproc*stride]; // OpenMP parallelization // #pragma omp parallel for #pragma omp parallel for reduction(+: cost_of_opening_x) for ( i = k1; i < k2; i++ ) { float x_cost = dist(points->p[i], points->p[x], points->dim) * points->p[i].weight; float current_cost = points->p[i].cost; if ( x_cost < current_cost ) { // point i would save cost just by switching to x // (note that i cannot be a median, // or else dist(p[i], p[x]) would be 0) switch_membership[i] = 1; cost_of_opening_x += x_cost - current_cost; } else { // cost of assigning i to x is at least current assignment cost of i // consider the savings that i's **current** median would realize // if we reassigned that median and all its members to x; // note we've already accounted for the fact that the median // would save z by closing; now we have to subtract from the savings // the extra cost of reassigning that median and its members int assign = points->p[i].assign; lower[center_table[assign]] += current_cost - x_cost; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif #ifdef PROFILE double t2 = gettime(); if( pid==0){ time_gain_dist += t2 - t1; } #endif // at this time, we can calculate the cost of opening a center // at x; if it is negative, we'll go through with opening it for ( int i = k1; i < k2; i++ ) { if( is_center[i] ) { double low = z; //aggregate from all threads for( int p = 0; p < nproc; p++ ) { low += work_mem[center_table[i]+p*stride]; } gl_lower[center_table[i]] = low; //printf("%d : %f %f\n", i, low, work_mem[center_table[i]+stride]); if ( low > 0 ) { // i is a median, and // if we were to open x (which we still may not) we'd close i // note, we'll ignore the following quantity unless we do open x ++number_of_centers_to_close; cost_of_opening_x -= low; } } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif //use the rest of working memory to store the following work_mem[pid*stride + K] = number_of_centers_to_close; work_mem[pid*stride + K+1] = cost_of_opening_x; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif // printf("thread %d cost complete\n",pid); if( pid==0 ) { gl_cost_of_opening_x = z; //aggregate for( int p = 0; p < nproc; p++ ) { gl_number_of_centers_to_close += (int)work_mem[p*stride + K]; gl_cost_of_opening_x += work_mem[p*stride+K+1]; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif // Now, check whether opening x would save cost; if so, do it, and // otherwise do nothing if ( gl_cost_of_opening_x < 0 ) { // we'd save money by opening x; we'll do it #pragma omp parallel for for ( int i = k1; i < k2; i++ ) { bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ; if ( switch_membership[i] || close_center ) { // Either i's median (which may be i itself) is closing, // or i is closer to x than to its current median points->p[i].cost = points->p[i].weight * dist(points->p[i], points->p[x], points->dim); points->p[i].assign = x; } } for( int i = k1; i < k2; i++ ) { if( is_center[i] && gl_lower[center_table[i]] > 0 ) { is_center[i] = false; } } if( x >= k1 && x < k2 ) { is_center[x] = true; } // pthread_barrier_wait(barrier); if( pid==0 ) { *numcenters = *numcenters + 1 - gl_number_of_centers_to_close; } } else { if( pid==0 ) gl_cost_of_opening_x = 0; // the value we'll return } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif if( pid == 0 ) { free(work_mem); // free(is_center); // free(switch_membership); // free(proc_cost_of_opening_x); // free(proc_number_of_centers_to_close); } #ifdef PROFILE double t3 = gettime(); if( pid==0 ) time_gain += t3-t0; #endif //printf("cost=%f\n", -gl_cost_of_opening_x); return -gl_cost_of_opening_x; } /* facility location on the points using local search */ /* z is the facility cost, returns the total cost and # of centers */ /* assumes we are seeded with a reasonable solution */ /* cost should represent this solution's cost */ /* halt if there is < e improvement after iter calls to gain */ /* feasible is an array of numfeasible points which may be centers */ static float pFL(Points *points, int *feasible, int numfeasible, float z, long *k, double cost, long iter, float e, int pid, pthread_barrier_t* barrier) { #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif long i; long x; double change; long numberOfPoints; change = cost; /* continue until we run iter iterations without improvement */ /* stop instead if improvement is less than e */ while (change/cost > 1.0*e) { change = 0.0; numberOfPoints = points->num; /* randomize order in which centers are considered */ if( pid == 0 ) { intshuffle(feasible, numfeasible); } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif for (i=0;i<iter;i++) { x = i%numfeasible; //printf("iteration %d started********\n", i); change += pgain(feasible[x], points, z, k, pid, barrier); c++; //printf("iteration %d finished @@@@@@\n", i); } cost -= change; #ifdef PRINTINFO if( pid == 0 ) { fprintf(stderr, "%d centers, cost %lf, total distance %lf\n", *k, cost, cost - z*(*k)); } #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } return(cost); } static int selectfeasible_fast(Points *points, int **feasible, int kmin, int pid, pthread_barrier_t* barrier) { #ifdef PROFILE double t1 = gettime(); #endif int numfeasible = points->num; if (numfeasible > (ITER*kmin*log((double)kmin))) numfeasible = (int)(ITER*kmin*log((double)kmin)); *feasible = (int *)malloc(numfeasible*sizeof(int)); float* accumweight; float totalweight; /* Calcuate my block. For now this routine does not seem to be the bottleneck, so it is not parallelized. When necessary, this can be parallelized by setting k1 and k2 to proper values and calling this routine from all threads ( it is called only by thread 0 for now ). Note that when parallelized, the randomization might not be the same and it might not be difficult to measure the parallel speed-up for the whole program. */ // long bsize = numfeasible; long k1 = 0; long k2 = numfeasible; float w; int l,r,k; /* not many points, all will be feasible */ if (numfeasible == points->num) { for (int i=k1;i<k2;i++) (*feasible)[i] = i; return numfeasible; } accumweight= (float*)malloc(sizeof(float)*points->num); accumweight[0] = points->p[0].weight; totalweight=0; for( int i = 1; i < points->num; i++ ) { accumweight[i] = accumweight[i-1] + points->p[i].weight; } totalweight=accumweight[points->num-1]; for(int i=k1; i<k2; i++ ) { w = (lrand48()/(float)INT_MAX)*totalweight; //binary search l=0; r=points->num-1; if( accumweight[0] > w ) { (*feasible)[i]=0; continue; } while( l+1 < r ) { k = (l+r)/2; if( accumweight[k] > w ) { r = k; } else { l=k; } } (*feasible)[i]=r; } free(accumweight); #ifdef PROFILE double t2 = gettime(); time_select_feasible += t2-t1; #endif return numfeasible; } /* compute approximate kmedian on the points */ static float pkmedian(Points *points, long kmin, long kmax, long* kfinal, int pid, pthread_barrier_t* barrier ) { int i; double cost; double lastcost; double hiz, loz, z; static long k; static int *feasible; static int numfeasible; static double* hizs; if( pid==0 ) hizs = (double*)calloc(nproc,sizeof(double)); hiz = loz = 0.0; long numberOfPoints = points->num; long ptDimension = points->dim; //my block long bsize = points->num/nproc; long k1 = bsize * pid; long k2 = k1 + bsize; if( pid == nproc-1 ) k2 = points->num; #ifdef PRINTINFO if( pid == 0 ) { printf("Starting Kmedian procedure\n"); printf("%i points in %i dimensions\n", numberOfPoints, ptDimension); } #endif #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif double myhiz = 0; for (long kk=k1;kk < k2; kk++ ) { myhiz += dist(points->p[kk], points->p[0], ptDimension)*points->p[kk].weight; } hizs[pid] = myhiz; #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif for( int i = 0; i < nproc; i++ ) { hiz += hizs[i]; } loz=0.0; z = (hiz+loz)/2.0; /* NEW: Check whether more centers than points! */ if (points->num <= kmax) { /* just return all points as facilities */ for (long kk=k1;kk<k2;kk++) { points->p[kk].assign = kk; points->p[kk].cost = 0; } cost = 0; if( pid== 0 ) { free(hizs); *kfinal = k; } return cost; } if( pid == 0 ) shuffle(points); cost = pspeedy(points, z, &k, pid, barrier); #ifdef PRINTINFO if( pid == 0 ) printf("thread %d: Finished first call to speedy, cost=%lf, k=%i\n",pid,cost,k); #endif i=0; /* give speedy SP chances to get at least kmin/2 facilities */ while ((k < kmin)&&(i<SP)) { cost = pspeedy(points, z, &k, pid, barrier); i++; } #ifdef PRINTINFO if( pid==0) printf("thread %d: second call to speedy, cost=%lf, k=%d\n",pid,cost,k); #endif /* if still not enough facilities, assume z is too high */ while (k < kmin) { #ifdef PRINTINFO if( pid == 0 ) { printf("%lf %lf\n", loz, hiz); printf("Speedy indicates we should try lower z\n"); } #endif if (i >= SP) {hiz=z; z=(hiz+loz)/2.0; i=0;} if( pid == 0 ) shuffle(points); cost = pspeedy(points, z, &k, pid, barrier); i++; } /* now we begin the binary search for real */ /* must designate some points as feasible centers */ /* this creates more consistancy between FL runs */ /* helps to guarantee correct # of centers at the end */ if( pid == 0 ) { numfeasible = selectfeasible_fast(points,&feasible,kmin,pid,barrier); for( int i = 0; i< points->num; i++ ) { is_center[points->p[i].assign]= true; } } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif while(1) { d++; #ifdef PRINTINFO if( pid==0 ) { printf("loz = %lf, hiz = %lf\n", loz, hiz); printf("Running Local Search...\n"); } #endif /* first get a rough estimate on the FL solution */ // pthread_barrier_wait(barrier); lastcost = cost; cost = pFL(points, feasible, numfeasible, z, &k, cost, (long)(ITER*kmax*log((double)kmax)), 0.1, pid, barrier); /* if number of centers seems good, try a more accurate FL */ if (((k <= (1.1)*kmax)&&(k >= (0.9)*kmin))|| ((k <= kmax+2)&&(k >= kmin-2))) { #ifdef PRINTINFO if( pid== 0) { printf("Trying a more accurate local search...\n"); } #endif /* may need to run a little longer here before halting without improvement */ cost = pFL(points, feasible, numfeasible, z, &k, cost, (long)(ITER*kmax*log((double)kmax)), 0.001, pid, barrier); } if (k > kmax) { /* facilities too cheap */ /* increase facility cost and up the cost accordingly */ loz = z; z = (hiz+loz)/2.0; cost += (z-loz)*k; } if (k < kmin) { /* facilities too expensive */ /* decrease facility cost and reduce the cost accordingly */ hiz = z; z = (hiz+loz)/2.0; cost += (z-hiz)*k; } /* if k is good, return the result */ /* if we're stuck, just give up and return what we have */ if (((k <= kmax)&&(k >= kmin))||((loz >= (0.999)*hiz)) ) { break; } #ifdef ENABLE_THREADS pthread_barrier_wait(barrier); #endif } //clean up... if( pid==0 ) { free(feasible); free(hizs); *kfinal = k; } return cost; } /* compute the means for the k clusters */ static int contcenters(Points *points) { long i, ii; float relweight; for (i=0;i<points->num;i++) { /* compute relative weight of this point to the cluster */ if (points->p[i].assign != i) { relweight=points->p[points->p[i].assign].weight + points->p[i].weight; relweight = points->p[i].weight/relweight; for (ii=0;ii<points->dim;ii++) { points->p[points->p[i].assign].coord[ii]*=1.0-relweight; points->p[points->p[i].assign].coord[ii]+= points->p[i].coord[ii]*relweight; } points->p[points->p[i].assign].weight += points->p[i].weight; } } return 0; } /* copy centers from points to centers */ static void copycenters(Points *points, Points* centers, long* centerIDs, long offset) { long i; long k; bool *is_a_median = (bool *) calloc(points->num, sizeof(bool)); /* mark the centers */ for ( i = 0; i < points->num; i++ ) { is_a_median[points->p[i].assign] = 1; } k=centers->num; /* count how many */ for ( i = 0; i < points->num; i++ ) { if ( is_a_median[i] ) { memcpy( centers->p[k].coord, points->p[i].coord, points->dim * sizeof(float)); centers->p[k].weight = points->p[i].weight; centerIDs[k] = i + offset; k++; } } centers->num = k; free(is_a_median); } typedef struct pkmedian_arg_t_ { Points* points; long kmin; long kmax; long* kfinal; int pid; pthread_barrier_t* barrier; } pkmedian_arg_t; static void* localSearchSub(void* arg_) { pkmedian_arg_t* arg= (pkmedian_arg_t*)arg_; pkmedian(arg->points,arg->kmin,arg->kmax,arg->kfinal,arg->pid,arg->barrier); return NULL; } static void localSearch( Points* points, long kmin, long kmax, long* kfinal ) { #ifdef PROFILE double t1 = gettime(); #endif pthread_barrier_t barrier; #ifdef ENABLE_THREADS pthread_barrier_init(&barrier,NULL,nproc); #endif pthread_t* threads = newa(pthread_t,nproc); pkmedian_arg_t* arg = newa(pkmedian_arg_t,nproc); for( int i = 0; i < nproc; i++ ) { arg[i].points = points; arg[i].kmin = kmin; arg[i].kmax = kmax; arg[i].pid = i; arg[i].kfinal = kfinal; arg[i].barrier = &barrier; #ifdef ENABLE_THREADS pthread_create(threads+i,NULL,localSearchSub,(void*)&arg[i]); #else localSearchSub(&arg[0]); #endif } for ( int i = 0; i < nproc; i++) { #ifdef ENABLE_THREADS pthread_join(threads[i],NULL); #endif } dela(threads); dela(arg); #ifdef ENABLE_THREADS pthread_barrier_destroy(&barrier); #endif #ifdef PROFILE double t2 = gettime(); time_local_search += t2-t1; #endif } typedef struct _PStream { long n; } PStream; static PStream *PStream_new(long n) { PStream *p = calloc(sizeof(PStream),1); p->n = n; return p; } static size_t PStream_read(PStream *p, float *dest, int dim, int num) { size_t count = 0; for( int i = 0; i < num && p->n > 0; i++ ) { for( int k = 0; k < dim; k++ ) { dest[i*dim + k] = lrand48()/(float)INT_MAX; } p->n--; count++; } return count; } static int PStream_ferror(PStream *p) { return 0; } static int PStream_feof(PStream *p) { return (p->n) <= 0; } static void outcenterIDs( Points* centers, long* centerIDs, char* outfile ) { FILE* fp = fopen(outfile, "w"); if( fp==NULL ) { fprintf(stderr, "error opening %s\n",outfile); exit(1); } int* is_a_median = (int*)calloc( sizeof(int), centers->num ); for( int i =0 ; i< centers->num; i++ ) { is_a_median[centers->p[i].assign] = 1; } for( int i = 0; i < centers->num; i++ ) { if( is_a_median[i] ) { fprintf(fp, "%u\n", centerIDs[i]); fprintf(fp, "%lf\n", centers->p[i].weight); for( int k = 0; k < centers->dim; k++ ) { fprintf(fp, "%lf ", centers->p[i].coord[k]); } fprintf(fp,"\n\n"); } } fclose(fp); } static void streamCluster( PStream* stream, long kmin, long kmax, int dim, long chunksize, long centersize, char* outfile ) { block = (float*)malloc( chunksize*dim*sizeof(float) ); float* centerBlock = (float*)malloc(centersize*dim*sizeof(float) ); long* centerIDs = (long*)malloc(centersize*dim*sizeof(long)); if( block == NULL ) { fprintf(stderr,"not enough memory for a chunk!\n"); exit(1); } Points points; points.dim = dim; points.num = chunksize; points.p = (Point *)malloc(chunksize*sizeof(Point)); for( int i = 0; i < chunksize; i++ ) { points.p[i].coord = &block[i*dim]; } Points centers; centers.dim = dim; centers.p = (Point *)malloc(centersize*sizeof(Point)); centers.num = 0; for( int i = 0; i< centersize; i++ ) { centers.p[i].coord = &centerBlock[i*dim]; centers.p[i].weight = 1.0; } long IDoffset = 0; long kfinal; while(1) { size_t numRead = PStream_read(stream,block, dim, chunksize ); fprintf(stderr,"read %d points\n",numRead); if( PStream_ferror(stream) || (numRead < (unsigned int)chunksize && !PStream_feof(stream))) { fprintf(stderr, "error reading data!\n"); exit(1); } points.num = numRead; for( int i = 0; i < points.num; i++ ) { points.p[i].weight = 1.0; } switch_membership = (bool*)malloc(points.num*sizeof(bool)); is_center = (bool*)calloc(points.num,sizeof(bool)); center_table = (int*)malloc(points.num*sizeof(int)); localSearch(&points,kmin, kmax,&kfinal); fprintf(stderr,"finish local search\n"); contcenters(&points); if( kfinal + centers.num > centersize ) { //here we don't handle the situation where # of centers gets too large. fprintf(stderr,"oops! no more space for centers\n"); exit(1); } #ifdef PRINTINFO printf("finish cont center\n"); #endif copycenters(&points, &centers, centerIDs, IDoffset); IDoffset += numRead; #ifdef PRINTINFO printf("finish copy centers\n"); #endif free(is_center); free(switch_membership); free(center_table); if( PStream_feof(stream) ) { break; } } //finally cluster all temp centers switch_membership = (bool*)malloc(centers.num*sizeof(bool)); is_center = (bool*)calloc(centers.num,sizeof(bool)); center_table = (int*)malloc(centers.num*sizeof(int)); localSearch( &centers, kmin, kmax ,&kfinal ); contcenters(&centers); outcenterIDs( &centers, centerIDs, outfile); } int test_omp_streamcluster(int numt) { char *outfilename = newa(char,MAXNAMESIZE); char *infilename = newa(char,MAXNAMESIZE); long kmin, kmax, n, chunksize, clustersize; int dim; int numthreads; c = 0; d = 0; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_streamcluster); #endif // configured as per run script kmin = 10; kmax = 20; dim = 256; n = 65536; chunksize = 65536; clustersize = 1000; strcpy(infilename, "none"); strcpy(outfilename, "output.txt"); nproc = numt; nk_omp_thread_init(); ompthreads = nproc; nproc = 1; omp_set_num_threads(ompthreads); srand48(SEED); PStream* stream; if( n > 0 ) { stream = PStream_new(n); } else { fprintf(stderr,"File Streams not supported\n"); } double t1 = gettime(); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif streamCluster(stream, kmin, kmax, dim, chunksize, clustersize, outfilename ); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif double t2 = gettime(); printf("time = %lf\n",t2-t1); del(stream); printf("time pgain = %lf\n", time_gain); printf("time pgain_dist = %lf\n", time_gain_dist); printf("time pgain_init = %lf\n", time_gain_init); printf("time pselect = %lf\n", time_select_feasible); printf("time pspeedy = %lf\n", time_speedy); printf("time pshuffle = %lf\n", time_shuffle); printf("time localSearch = %lf\n", time_local_search); printf("loops=%d\n", d); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif nk_omp_thread_deinit(); return 0; }
GB_binop__le_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__le_int8) // A.*B function (eWiseMult): GB (_AemultB_03__le_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int8) // A*D function (colscale): GB (_AxD__le_int8) // D*A function (rowscale): GB (_DxB__le_int8) // C+=B function (dense accum): GB (_Cdense_accumB__le_int8) // C+=b function (dense accum): GB (_Cdense_accumb__le_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int8) // C=scalar+B GB (_bind1st__le_int8) // C=scalar+B' GB (_bind1st_tran__le_int8) // C=A+scalar GB (_bind2nd__le_int8) // C=A'+scalar GB (_bind2nd_tran__le_int8) // C type: bool // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT8 || GxB_NO_LE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_11.c
//------------------------------------------------------------------------------ // GB_subassign_11: C(I,J)<M,repl> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 11: C(I,J)<M,repl> += scalar ; using S // M: present // Mask_comp: false // C_replace: true // accum: present // A: scalar // S: constructed #define GB_FREE_WORK GB_FREE_TWO_SLICE #include "GB_subassign_methods.h" GrB_Info GB_subassign_11 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; // GB_GET_MASK ; const int64_t *GB_RESTRICT Mp = M->p ; // const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; GB_GET_ACCUM_SCALAR ; GB_GET_S ; //-------------------------------------------------------------------------- // Method 11: C(I,J)<M,repl> += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in M+S must be examined. All entries in S // are modified: if M(i,j)=1 then S(i,j) is used to write to the // corresponding entry in C. If M(i,j) is not present, or zero, then the // entry in C is cleared (because of C_replace). If S(i,j) is not present, // and M(i,j)=1, then the scalar is inserted into C. The only case that // can be skipped is if neither S nor M is present. As a result, this // method need not traverse all of IxJ. It can limit its traversal to the // pattern of M+S. // Method 09 and Method 11 are very similar. //-------------------------------------------------------------------------- // Parallel: Z=M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- GB_SUBASSIGN_TWO_SLICE (M, S) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get S(:,j) and M(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and M(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = Si [pS] ; int64_t iM = Mi [pM] ; if (iS < iM) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_C_S_LOOKUP ; if (GB_mcast (Mx, pM, msize)) { // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (M) ; } } // while list S (:,j) has entries. List M (:,j) exhausted while (pS < pS_end) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list M (:,j) has entries. List S (:,j) exhausted while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get S(:,j) and M(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and M(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = Si [pS] ; int64_t iM = Mi [pM] ; if (iS < iM) { // S (i,j) is present but M (i,j) is not GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_NEXT (S) ; GB_NEXT (M) ; } } // while list M (:,j) has entries. List S (:,j) exhausted while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iM = Mi [pM] ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
linAlgAXPBY.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void axpby(const dlong & N, const dlong & xOffset, const dlong& yOffset, const dfloat & alpha, const dfloat * __restrict__ cpu_a, const dfloat &beta, dfloat * __restrict__ cpu_b){ #pragma omp parallel for for(dlong i=0;i<N;++i){ const dfloat ai = cpu_a[i + xOffset]; const dfloat bi = cpu_b[i + yOffset]; cpu_b[i + yOffset] = alpha*ai + beta*bi; } } extern "C" void axpbyMany(const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat & alpha, const dfloat * __restrict__ cpu_a, const dfloat & beta, dfloat * __restrict__ cpu_b){ #pragma omp parallel for collapse(2) for(int fld=0;fld<Nfields;fld++) { for(dlong i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat bi = cpu_b[id]; cpu_b[id] = alpha*ai + beta*bi; } } }
reduction-task-2.c
int v; extern void foo (int); void bar (void) { int i; #pragma omp for reduction (task, +: v) nowait /* { dg-error "'task' reduction modifier on a construct with a 'nowait' clause" } */ for (i = 0; i < 64; i++) foo (i); #pragma omp sections nowait reduction (task, +: v) /* { dg-error "'task' reduction modifier on a construct with a 'nowait' clause" } */ { foo (-2); #pragma omp section foo (-3); } #pragma omp simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) v++; #pragma omp for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp parallel for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp teams distribute parallel for simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp taskloop reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) foo (i); #pragma omp taskloop simd reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct combined with 'simd'" } */ for (i = 0; i < 64; i++) v++; #pragma omp teams reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */ foo (i); #pragma omp teams distribute reduction (task, +: v) /* { dg-error "invalid 'task' reduction modifier on construct not combined with 'parallel', 'for' or 'sections'" } */ for (i = 0; i < 64; i++) foo (i); }
single3.c
/* */ #include <stdio.h> #include <omp.h> #include<assert.h> int a; int b; int main(void) { #pragma omp parallel { #pragma omp single { int num_threads = 2; } #pragma omp single nowait copyprivate(a,b) { int num_threads = 3; } } return 0; }
GB_unaryop__minv_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_fp32 // op(A') function: GB_tran__minv_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__iseq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint8) // A*D function (colscale): GB (_AxD__iseq_uint8) // D*A function (rowscale): GB (_DxB__iseq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint8) // C=scalar+B GB (_bind1st__iseq_uint8) // C=scalar+B' GB (_bind1st_tran__iseq_uint8) // C=A+scalar GB (_bind2nd__iseq_uint8) // C=A'+scalar GB (_bind2nd_tran__iseq_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT8 || GxB_NO_ISEQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__iseq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif